prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from __future__ import print_function
from __future__ import division
from collections import namedtuple
import logging
import numpy as np
from scipy.optimize import minimize
import open3d as o3
from . import features as ft
from . import cost_functions as cf
from .log import log
class L2DistRegistration(object):
"""L2 distance registration class
This algorithm expresses point clouds as mixture gaussian distributions and
performs registration by minimizing the distance between two distributions.
Args:
source (numpy.ndarray): Source point cloud data.
feature_gen (probreg.features.Feature): Generator of mixture gaussian distribution.
cost_fn (probreg.cost_functions.CostFunction): Cost function to caliculate L2 distance.
sigma (float, optional): Scaling parameter for L2 distance.
delta (float, optional): Annealing parameter for optimization.
use_estimated_sigma (float, optional): If this flag is True,
sigma estimates from the source point cloud.
"""
def __init__(self, source, feature_gen, cost_fn,
sigma=1.0, delta=0.9,
use_estimated_sigma=True):
self._source = source
self._feature_gen = feature_gen
self._cost_fn = cost_fn
self._sigma = sigma
self._delta = delta
self._use_estimated_sigma = use_estimated_sigma
self._callbacks = []
if not self._source is None and self._use_estimated_sigma:
self._estimate_sigma(self._source)
def set_source(self, source):
self._source = source
if self._use_estimated_sigma:
self._estimate_sigma(self._source)
def set_callbacks(self, callbacks):
self._callbacks.extend(callbacks)
def _estimate_sigma(self, data):
ndata, dim = data.shape
data_hat = data - | np.mean(data, axis=0) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
from os import makedirs
from os.path import isfile, exists
from scipy.constants import mu_0
# from numba import njit
def calcDipolMomentAnalytical(remanence, volume):
""" Calculating the magnetic moment from the remanence in T and the volume in m^3"""
m = remanence * volume / mu_0 # [A * m^2]
return m
def plotSimple(data, FOV, fig, ax, cbar=True, **args):
""" Generate simple colorcoded plot of 2D grid data with contour. Returns axes object."""
im = ax.imshow(data, extent=FOV, origin="lower", **args)
cs = ax.contour(data, colors="k", extent=FOV, origin="lower", linestyles="dotted")
class nf(float):
def __repr__(self):
s = f"{self:.1f}"
return f"{self:.0f}" if s[-1] == "0" else s
cs.levels = [nf(val) for val in cs.levels]
if plt.rcParams["text.usetex"]:
fmt = r"%r"
else:
fmt = "%r"
ax.clabel(cs, cs.levels, inline=True, fmt=fmt, fontsize=10)
if cbar == True:
fig.colorbar(im, ax=ax)
return im
def centerCut(field, axis):
"""return a slice of the data at the center for the specified axis"""
dims = np.shape(field)
return np.take(field, indices=int(dims[axis] / 2), axis=axis)
def isHarmonic(field, sphericalMask, shellMask):
"""Checks if the extrema of the field are in the shell."""
fullField = np.multiply(field, sphericalMask) # [T]
reducedField = np.multiply(field, shellMask)
if int(ptpPPM(fullField)) > int(ptpPPM(reducedField)):
print(
"ptpPPM of field:",
ptpPPM(fullField),
"ptpPPM on surface",
ptpPPM(reducedField),
)
print("Masked field is NOT a harmonic function...")
return False
else:
print(
"ptpPPM of field:",
ptpPPM(fullField),
"ptpPPM on surface",
ptpPPM(reducedField),
)
print("Masked field is harmonic.")
sizeSpherical = int(np.nansum(sphericalMask))
sizeShell = int(np.nansum(shellMask))
print(
"Reduced size of field from {} to {} ({}%)".format(
sizeSpherical, sizeShell, int(100 * sizeShell / sizeSpherical)
)
)
return True
def genQmesh(field, resolution):
"""Generate a mesh of quadratic coordinates"""
mask = np.zeros(np.shape(field))
xAxis = np.linspace(
-(np.size(field, 0) - 1) * resolution / 2,
(np.size(field, 0) - 1) * resolution / 2,
np.size(field, 0),
)
yAxis = np.linspace(
-(np.size(field, 1) - 1) * resolution / 2,
(np.size(field, 1) - 1) * resolution / 2,
np.size(field, 1),
)
zAxis = np.linspace(
-(np.size(field, 2) - 1) * resolution / 2,
(np.size(field, 2) - 1) * resolution / 2,
np.size(field, 2),
)
xAxis, yAxis, zAxis = np.meshgrid(xAxis, yAxis, zAxis)
xAxisSquare = np.square(xAxis)
yAxisSquare = np.square(yAxis)
zAxisSquare = np.square(zAxis)
return mask, xAxisSquare, yAxisSquare, zAxisSquare
def genMask(
field, resolution, diameter=False, shellThickness=False, axis=False, debug=False
):
"""Generate a mask for a spherical shell"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if (shellThickness != False) and (diameter != False):
if debug == True:
print(
"Creating shell mask. (resolution = {}, diameter = {}, shellThickness = {})".format(
resolution, diameter, shellThickness
)
)
print("The shell is added inside the sphere surface!")
rAxisSquare = xAxisSquare + yAxisSquare + zAxisSquare
innerRadiusSquare = (diameter / 2 - shellThickness) ** 2
outerRadiusSquare = (diameter / 2) ** 2
mask[
(rAxisSquare <= outerRadiusSquare) & (rAxisSquare >= innerRadiusSquare)
] = 1
mask[mask == 0] = "NaN"
return mask
def genSphericalMask(field, diameter, resolution):
"""generate spherical mask
with >>diameter<<
for a >>field<< and a given >>resolution<<
"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
mask[xAxisSquare + yAxisSquare + zAxisSquare <= (diameter / 2) ** 2] = 1
mask[mask == 0] = "NaN"
return mask
def genSliceMask(field, diameter, resolution, axis="x"):
"""generate mask for a circular slice
with >>diameter<<
for a >>field<< and a given >>resolution<<
Every input variable has to have the same unit (mm or m or ...)
"""
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if axis == "z":
mask[
(xAxisSquare + yAxisSquare <= (diameter / 2) ** 2) & (zAxisSquare == 0)
] = 1
if axis == "y":
mask[
(xAxisSquare + zAxisSquare <= (diameter / 2) ** 2) & (yAxisSquare == 0)
] = 1
if axis == "x":
mask[
(yAxisSquare + zAxisSquare <= (diameter / 2) ** 2) & (xAxisSquare == 0)
] = 1
mask[mask == 0] = "NaN"
return mask
def genEllipseSliceMask(field, a, b, resolution, axis="x"):
"""generate mask for a circulat slice
with >>diameter<<
for a >>field<< and a given >>resolution<<
Every input variable has to have the same unit (mm or m or ...)
"""
# generate spherical mask
mask, xAxisSquare, yAxisSquare, zAxisSquare = genQmesh(field, resolution)
if axis == "z":
mask[
(xAxisSquare / (a / 2) ** 2 + yAxisSquare / (b / 2) ** 2 <= 1)
& (zAxisSquare == 0)
] = 1
elif axis == "y":
mask[
(xAxisSquare / (a / 2) ** 2 + zAxisSquare / (b / 2) ** 2 <= 1)
& (yAxisSquare == 0)
] = 1
elif axis == "x":
mask[
(yAxisSquare / (a / 2) ** 2 + zAxisSquare / (b / 2) ** 2 <= 1)
& (xAxisSquare == 0)
] = 1
mask[mask == 0] = "NaN"
return mask
def ptpPPM(field):
"""Calculate the peak-to-peak homogeneity in ppm."""
return 1e6 * (np.nanmax(field) - np.nanmin(field)) / np.nanmean(field)
def saveParameters(parameters, folder):
"""Saving a dict to the file parameters.npy .
If the file exist it is beeing updated, if the parameters are not stored already.
__future__: Fix usecase: Some parameters are in dict which are identical to the
stored ones and some are new!
"""
try:
print("Saving parameters to file...", end=" ")
print("\x1b[6;30;42m", *parameters.keys(), "\x1b[0m", end=" ")
oldParameters = loadParameters(folder)
if parameters.items() <= oldParameters.items():
print(" ... the parameters are already saved and identical.")
elif set(parameters).issubset(
set(oldParameters)
): # here just keys are compared!
print(
" ...\x1b[6;37;41m"
+ " parameters are NOT saved. Other parameters are stored. Please cleanup! "
+ "\x1b[0m"
)
else:
oldParameters.update(parameters)
np.save(folder + "/parameters", oldParameters)
print(" ... added.")
except FileNotFoundError or AttributeError:
np.save(folder + "/parameters", parameters)
oldParameters = parameters
# print('The following parameters are currently stored:\n', *oldParameters.keys())
def loadParameters(folder):
return np.load(folder + "/parameters.npy", allow_pickle=True).item()
def loadParameter(key, folder):
return loadParameters(folder)[key]
def displayParameters(folder):
print(loadParameters(folder))
def createShimfieldsShimRingV2(
numMagnets=(32, 44),
rings=4,
radii=(0.074, 0.097),
zRange=(-0.08, -0.039, 0.039, 0.08),
resolution=1000,
kValue=2,
simDimensions=(0.04, 0.04, 0.04),
numRotations=2,
):
""" Calculating the magnetic field distributions for a single or multiple Halbach Rings.
This has to be multiplied with the magnetic moment amplitude of a magnet to get the real distribution
For every magnet position we set 4 different rotations: 0°, 45°, 90°, 135°. This has to be considered in the cost function
otherwise two magnets are placed in one position
resolution is the amount of sample points times data points in one dimension
"""
mu = mu_0
# positioning of the magnets in a circle
if len(zRange) == 2:
rings = np.linspace(zRange[0], zRange[1], rings)
elif rings == len(zRange):
rings = np.array(zRange)
else:
print("No clear definition how to place shims...")
rotation_elements = np.linspace(0, np.pi, numRotations, endpoint=False)
# create array to store field data
count = 0
if type(numMagnets) in (list, tuple):
totalNumMagnets = np.sum(numMagnets) * np.size(rings) * numRotations
else:
totalNumMagnets = numMagnets * np.size(rings) * numRotations * len(radii)
print(totalNumMagnets, numMagnets, np.size(rings), | np.size(numRotations) | numpy.size |
from win32api import GetSystemMetrics
print("Width =", GetSystemMetrics(0))
print("Height =", GetSystemMetrics(1))
# -*- coding: utf-8 -*-
#
# This file is part of PyGaze - the open-source toolbox for eye tracking
#
# PyGazeAnalyser is a Python module for easily analysing eye-tracking data
# Copyright (C) 2014 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# Gaze Plotter
#
# Produces different kinds of plots that are generally used in eye movement
# research, e.g. heatmaps, scanpaths, and fixation locations as overlays of
# images.
#
# version 2 (02 Jul 2014)
__author__ = "<NAME>"
# native
import os
# external
import numpy
import matplotlib
from matplotlib import pyplot, image
from PIL import Image
# # # # #
# LOOK
# COLOURS
# all colours are from the Tango colourmap, see:
# http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines#Color_Palette
COLS = { "butter": [ '#fce94f',
'#edd400',
'#c4a000'],
"orange": [ '#fcaf3e',
'#f57900',
'#ce5c00'],
"chocolate": [ '#e9b96e',
'#c17d11',
'#8f5902'],
"chameleon": [ '#8ae234',
'#73d216',
'#4e9a06'],
"skyblue": [ '#729fcf',
'#3465a4',
'#204a87'],
"plum": [ '#ad7fa8',
'#75507b',
'#5c3566'],
"scarletred":[ '#ef2929',
'#cc0000',
'#a40000'],
"aluminium": [ '#eeeeec',
'#d3d7cf',
'#babdb6',
'#888a85',
'#555753',
'#2e3436'],
}
# # FONT
# FONT = {'family': 'Windows',
# 'size': 15}
# matplotlib.rc('font', **FONT)
# # # # #
# FUNCTIONS NEW
def draw_fixations_new(fix, dispsize, imagefile=None, durationsize=True, durationcolour=True, alpha=0.5, savefilename=None):
"""Draws circles on the fixation locations, optionally on top of an image,
with optional weigthing of the duration for circle size and colour
arguments
fixations - a list of fixation ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Efix']
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
durationsize - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the circle
size; longer duration = bigger (default = True)
durationcolour - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the circle
colour; longer duration = hotter (default = True)
alpha - float between 0 and 1, indicating the transparancy of
the heatmap, where 0 is completely transparant and 1
is completely untransparant (default = 0.5)
savefilename - full path to the file in which the heatmap should be
saved, or None to not save the file (default = None)
returns
fig - a matplotlib.pyplot Figure instance, containing the
fixations
"""
# FIXATIONS
# fix = parse_fixations(fixations)
# IMAGE
fig, ax = draw_display(dispsize, imagefile=imagefile)
# CIRCLES
# duration weigths
if durationsize:
siz = 100 * (fix['dur']/30.0)
else:
siz = 100 * numpy.median(fix['dur']/30.0)
if durationcolour:
col = fix['dur']
else:
col = COLS['chameleon'][2]
# draw circles
ax.scatter(fix['x'], fix['y'], s=siz, c=col, marker='o', cmap='jet', alpha=alpha, zorder=2 )
for i, txt in enumerate( range(0, len(fix['x']) )):
ax.annotate(txt, xy= (fix['x'][i], fix['y'][i]), ha='center')
pyplot.plot(fix['x'],fix['y'], lw=2, zorder=1)
# ax.set_axis_bgcolor("lightslategray")
# FINISH PLOT
# invert the y axis, as (0,0) is top left on a display
ax.invert_yaxis()
# save the figure if a file name was provided
if savefilename != None:
fig.savefig(savefilename) #, transparent=True, edgecolor=None)
# pyplot.show()
return fig
# # # # #
# FUNCTIONS NEW
def draw_heatmap_new(fix, dispsize, imagefile=None, durationweight=True, alpha=0.5, savefilename=None):
"""Draws a heatmap of the provided fixations, optionally drawn over an
image, and optionally allocating more weight to fixations with a higher
duration.
arguments
fixations - a list of fixation ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Efix']
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
durationweight - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the heatmap
intensity; longer duration = hotter (default = True)
alpha - float between 0 and 1, indicating the transparancy of
the heatmap, where 0 is completely transparant and 1
is completely untransparant (default = 0.5)
savefilename - full path to the file in which the heatmap should be
saved, or None to not save the file (default = None)
returns
fig - a matplotlib.pyplot Figure instance, containing the
heatmap
"""
# FIXATIONS
# fix = parse_fixations(fixations)
# IMAGE
fig, ax = draw_display(dispsize, imagefile=imagefile)
# HEATMAP
# Gaussian
gwh = 200
gsdwh = gwh/6
gaus = gaussian(gwh,gsdwh)
# matrix of zeroes
strt = int(gwh/2)
heatmapsize = int(dispsize[1] + 2*strt), int(dispsize[0] + 2*strt)
# print(list(heatmapsize))
heatmap = numpy.zeros(heatmapsize, dtype=float)
# create heatmap
for i in range(0,len(fix['dur'])):
# get x and y coordinates
x = int(strt + fix['x'][i] - int(gwh/2))
y = int(strt + fix['y'][i] - int(gwh/2))
# correct Gaussian size if either coordinate falls outside of
# display boundaries
if (not 0 < x < dispsize[0]) or (not 0 < y < dispsize[1]):
hadj=[0,gwh];vadj=[0,gwh]
if 0 > x:
hadj[0] = abs(x)
x = 0
elif dispsize[0] < x:
hadj[1] = int(gwh - int(x-dispsize[0]))
if 0 > y:
vadj[0] = abs(y)
y = 0
elif dispsize[1] < y:
vadj[1] = int(gwh - int(y-dispsize[1]))
# add adjusted Gaussian to the current heatmap
try:
heatmap[y:y+vadj[1],x:x+hadj[1]] += gaus[vadj[0]:vadj[1],hadj[0]:hadj[1]] * fix['dur'][i]
except:
# fixation was probably outside of display
pass
else:
# add Gaussian to the current heatmap
heatmap[y:y+gwh,x:x+gwh] += gaus * fix['dur'][i]
# resize heatmap
heatmap = heatmap[strt:dispsize[1]+strt,strt:dispsize[0]+strt]
# remove zeros
lowbound = numpy.mean(heatmap[heatmap>0])
heatmap[heatmap<lowbound] = numpy.NaN
# draw heatmap on top of image
ax.imshow(heatmap, cmap='jet', alpha=alpha)
# FINISH PLOT
# invert the y axis, as (0,0) is top left on a display
ax.invert_yaxis()
# save the figure if a file name was provided
if savefilename != None:
fig.savefig(savefilename) #, facecolor='w', edgecolor=None)
# fig.show()
# pyplot.waitforbuttonpress()
return fig
# # # # #
# HELPER FUNCTIONS
def draw_display(dispsize, imagefile=None):
"""Returns a matplotlib.pyplot Figure and its axes, with a size of
dispsize, a black background colour, and optionally with an image drawn
onto it
arguments
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
returns
fig, ax - matplotlib.pyplot Figure and its axes: field of zeros
with a size of dispsize, and an image drawn onto it
if an imagefile was passed
"""
# construct screen (black background)
screen = numpy.zeros((dispsize[1],dispsize[0],3), dtype='float32')
# if an image location has been passed, draw the image
if imagefile != None:
# check if the path to the image exists
if not os.path.isfile(imagefile):
raise Exception("ERROR in draw_display: imagefile not found at '%s'" % imagefile)
Image.open(imagefile).convert('RGB').save(imagefile)
# load image
img = image.imread(imagefile)
# flip image over the horizontal axis
# (do not do so on Windows, as the image appears to be loaded with
# the correct side up there; what's up with that? :/)
if not os.name == 'nt':
img = | numpy.flipud(img) | numpy.flipud |
import numpy as np
from scipy.stats import expon
from pfb.opt.power_method import power_method
from pfb.opt.pcg import pcg
from pfb.opt.primal_dual import primal_dual
from pfb.operators.psi import DaskPSI
from pfb.operators.psf import PSF
from pfb.prox.prox_21 import prox_21
from pfb.utils.fits import save_fits
from pfb.utils.misc import Gaussian2D
import pyscilog
log = pyscilog.get_logger('SARA')
def resid_func(x, dirty, hessian, mask, beam, wsum):
"""
Returns the unattenuated residual
"""
residual = dirty - hessian(mask(beam(x)))/wsum
residual_mfs = | np.sum(residual, axis=0) | numpy.sum |
from stats import *
import pandas as pd
import numpy as np
import copy
from scipy.stats import t
class Regression():
def __init__(self):
self.stats = Stats()
def regress(self, reg_name, data, y_name, beta_names, min_val = 0,
max_val = None, constant = True):
self.min_val = 0
if max_val != None:
self.max_val = max_val
else:
self.max_val = len(data)
self.reg_name = reg_name
self.data = copy.copy(data)
self.y_name = y_name
self.beta_names = copy.copy(beta_names)
if constant:
self.add_constant()
self.build_matrices()
self.calculate_regression_stats()
self.build_summary()
def add_constant(self):
self.data["Constant"] = 1
self.beta_names.append("Constant")
def build_matrices(self):
# Transform dataframes to matrices
self.y = np.matrix(self.data[self.y_name]\
[self.min_val:self.max_val]).getT()
# create standard array of X values
self.X = self.data[self.beta_names].values
# create standard array of X values
self.X = np.matrix(self.X)
self.X_transpose = np.matrix(self.X).getT()
#(X'X)^-1
X_transp_X = np.matmul(self.X_transpose, self.X)
X_transp_X_Inv = X_transp_X.getI()
#X'Y
X_transp_y = np.matmul(self.X_transpose, self.y)
self.Betas = | np.matmul(X_transp_X_Inv, X_transp_y) | numpy.matmul |
"""Test functionality related to glm module."""
import os
import pathlib
import datetime
import logging
from unittest.mock import patch, call, MagicMock
import numpy
import pandas
import pytest
from .conftest import _mk_test_files
from . import utils
def test_get_basedir(tmp_path, monkeypatch):
"""Test getting the GLM basedir."""
from sattools.glm import get_dwd_glm_basedir
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
for m in ("C", "F"):
d = get_dwd_glm_basedir(m)
assert d == pathlib.Path(tmp_path / "nas" / "GLM-processed" /
f"{m:s}" / "1min")
for m in ("M1", "M2"):
d = get_dwd_glm_basedir(m, lat=45, lon=-55.3)
assert d == pathlib.Path(tmp_path / "nas" / "GLM-processed" /
f"{m:s}" / "45.0_-55.3" / "1min")
with pytest.raises(ValueError):
d = get_dwd_glm_basedir("invalid")
def test_get_pattern(tmp_path, monkeypatch):
"""Test getting GLM pattern."""
from sattools.glm import get_pattern_dwd_glm
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
for m in "CF":
p = get_pattern_dwd_glm(m)
assert p == str(pathlib.Path(
tmp_path / "nas" / "GLM-processed" / f"{m:s}" / "1min"
/ "{year}/{month}/{day}/{hour}/"
f"OR_GLM-L2-GLM{m:s}-M3_G16_"
"s{year}{doy}{hour}{minute}{second}*_"
"e{end_year}{end_doy}{end_hour}{end_minute}{end_second}*_c*.nc"))
for i in (1, 2):
# NB: until the fix for
# https://github.com/deeplycloudy/glmtools/issues/73 the output
# filenames always show M1 as the sector
p = get_pattern_dwd_glm(f"M{i:d}", lat=45, lon=-55.3)
assert p == str(pathlib.Path(
tmp_path / "nas" / "GLM-processed" / f"M{i:d}" / "45.0_-55.3" /
"1min" / "{year}/{month}/{day}/{hour}/"
"OR_GLM-L2-GLMM1-M3_G16_"
"s{year}{doy}{hour}{minute}{second}*_"
"e{end_year}{end_doy}{end_hour}{end_minute}{end_second}*_c*.nc"))
@patch("appdirs.user_cache_dir")
@patch("s3fs.S3FileSystem")
def test_ensure_glm_lcfa(sS, au, lcfa_pattern, lcfa_files, tmp_path, caplog,
monkeypatch):
"""Test ensuring GLM LCFA is created."""
from sattools.glm import ensure_glm_lcfa_for_period
from fsspec.implementations.local import LocalFileSystem
from typhon.files.fileset import NoFilesError
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
au.return_value = str(tmp_path / "whole-file-cache")
sS.return_value = LocalFileSystem()
with patch("sattools.glm.pattern_s3_glm_lcfa", lcfa_pattern):
# test that I'm raising a FileNotFoundError if unexpectedly no file
# created where expected
with patch("fsspec.implementations.cached.WholeFileCacheFileSystem"):
with pytest.raises(FileNotFoundError):
for _ in ensure_glm_lcfa_for_period(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 6, 0)):
pass
with caplog.at_level(logging.DEBUG):
files = list(ensure_glm_lcfa_for_period(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 6, 0)))
assert (f"Downloading {tmp_path!s}/lcfa-fake/"
f"lcfa-fake-19000101000000-000100.nc" in caplog.text)
assert (f"Writing to {tmp_path!s}/"
f"whole-file-cache/lcfa-fake-19000101000000-000100.nc" in
caplog.text)
assert len(files) == 6
assert files == [
pathlib.Path(
tmp_path / "whole-file-cache" /
f"lcfa-fake-1900010100{m:>02d}00-00{m+1:>02d}00.nc")
for m in range(6)]
for f in files:
assert f.exists()
files = list(ensure_glm_lcfa_for_period(
datetime.datetime(1900, 1, 1, 0, 1, 0),
datetime.datetime(1900, 1, 1, 0, 2, 0)))
assert len(files) == 1
assert files == [
pathlib.Path(
tmp_path / "whole-file-cache" /
"lcfa-fake-19000101000100-000200.nc")]
with pytest.raises(NoFilesError):
next(ensure_glm_lcfa_for_period(
datetime.datetime(1900, 1, 2, 0, 0, 0),
datetime.datetime(1900, 1, 2, 0, 1, 0)))
@patch("sattools.glm.run_glmtools")
@patch("appdirs.user_cache_dir")
@patch("s3fs.S3FileSystem")
def test_ensure_glm(sS, au, sgr, glm_files, lcfa_pattern,
lcfa_files, tmp_path, monkeypatch):
"""Test ensuring GLM GLMC is calculated."""
from sattools.glm import ensure_glm_for_period
from sattools.glm import get_pattern_dwd_glm
from fsspec.implementations.local import LocalFileSystem
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
au.return_value = str(tmp_path / "whole-file-cache")
sS.return_value = LocalFileSystem()
with patch("sattools.glm.pattern_s3_glm_lcfa", lcfa_pattern):
with pytest.raises(RuntimeError): # files not created when testing
next(ensure_glm_for_period(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 6, 0),
sector="C"))
sgr.assert_has_calls(
[call([tmp_path / "whole-file-cache" /
f"lcfa-fake-1900010100{m:>02d}00-00{m+1:>02d}00.nc"],
max_files=60,
sector="C")
for m in (2, 4)])
def fake_run(files, max_files, sector="C", lat=None, lon=None):
"""Create files when testing."""
_mk_test_files(get_pattern_dwd_glm(sector, lat=lat, lon=lon),
(0, 1, 2, 3, 4, 5, 6))
sgr.side_effect = fake_run
g = ensure_glm_for_period(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 6, 0),
sector="C")
fi = next(g)
assert isinstance(fi, str)
assert os.fspath(fi) == os.fspath(
tmp_path / "nas" / "GLM-processed" / "C" /
"1min" / "1900" / "01" / "01" / "00" /
"OR_GLM-L2-GLMC-M3_G16_s1900001000000*_e1900001000100*_c*.nc")
g = ensure_glm_for_period(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 6, 0),
sector="M1",
lat=10,
lon=20)
fi = next(g)
assert os.fspath(fi) == os.fspath(
tmp_path / "nas" / "GLM-processed" / "M1" / "10.0_20.0" /
"1min" / "1900" / "01" / "01" / "00" /
"OR_GLM-L2-GLMM1-M3_G16_s1900001000000*_e1900001000100*_c*.nc")
def test_find_coverage(glm_files, tmp_path, monkeypatch):
"""Test finding GLM time coverage."""
from sattools.glm import find_glm_coverage
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
covered = list(find_glm_coverage(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 6, 0),
sector="C"))
pI = pandas.Interval
pT = pandas.Timestamp
assert covered == [
pI(pT("1900-01-01T00:00:00"), pT("1900-01-01T00:01:00")),
pI(pT("1900-01-01T00:01:00"), pT("1900-01-01T00:02:00")),
pI(pT("1900-01-01T00:03:00"), pT("1900-01-01T00:04:00")),
pI(pT("1900-01-01T00:05:00"), pT("1900-01-01T00:06:00"))]
covered = list(find_glm_coverage(
datetime.datetime(1900, 1, 2, 3, 4, 5),
datetime.datetime(1900, 5, 4, 3, 2, 1),
sector="C"))
assert covered == []
covered = list(find_glm_coverage(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 6, 0),
sector="F"))
assert covered == [
pI(pT("1900-01-01T00:00:00"), pT("1900-01-01T00:01:00")),
pI(pT("1900-01-01T00:02:00"), pT("1900-01-01T00:03:00")),
pI(pT("1900-01-01T00:05:00"), pT("1900-01-01T00:06:00"))]
covered = list(find_glm_coverage(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 9, 0),
sector="M1", lat=1.2, lon=2.3))
assert covered == [
pI(pT("1900-01-01T00:00:00"), pT("1900-01-01T00:01:00")),
pI(pT("1900-01-01T00:05:00"), pT("1900-01-01T00:06:00")),
pI(pT("1900-01-01T00:08:00"), pT("1900-01-01T00:09:00"))]
covered = list(find_glm_coverage(
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 9, 0),
sector="M2", lat=1.2, lon=2.3))
assert covered == [
pI(pT("1900-01-01T00:00:00"), pT("1900-01-01T00:01:00")),
pI(pT("1900-01-01T00:02:00"), pT("1900-01-01T00:03:00")),
pI(pT("1900-01-01T00:04:00"), pT("1900-01-01T00:05:00"))]
def test_find_gaps(glm_files, monkeypatch, tmp_path):
"""Test finding GLM time coverage gaps."""
from sattools.glm import find_glm_coverage_gaps
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
pI = pandas.Interval
pT = pandas.Timestamp
gaps = list(find_glm_coverage_gaps(
datetime.datetime(1900, 1, 1, 0, 0),
datetime.datetime(1900, 1, 1, 0, 8),
sector="C"))
assert gaps == [
pI(pT("1900-01-01T00:02:00"), pT("1900-01-01T00:03:00")),
pI(pT("1900-01-01T00:04:00"), pT("1900-01-01T00:05:00")),
pI(pT("1900-01-01T00:06:00"), pT("1900-01-01T00:08:00"))]
gaps = list(find_glm_coverage_gaps(
datetime.datetime(1900, 1, 2, 0, 0),
datetime.datetime(1900, 1, 2, 0, 8),
sector="C"))
assert gaps == [
pI(pT("1900-01-02T00:00:00"), pT("1900-01-02T00:08:00"))]
gaps = list(find_glm_coverage_gaps(
datetime.datetime(1900, 1, 1, 0, 0),
datetime.datetime(1900, 1, 1, 0, 2),
sector="C"))
assert gaps == []
gaps = list(find_glm_coverage_gaps(
datetime.datetime(1900, 1, 1, 0, 0),
datetime.datetime(1900, 1, 1, 0, 8),
sector="F"))
assert gaps == [
pI(pT("1900-01-01T00:01:00"), pT("1900-01-01T00:02:00")),
pI(pT("1900-01-01T00:03:00"), pT("1900-01-01T00:05:00")),
pI(pT("1900-01-01T00:06:00"), pT("1900-01-01T00:08:00"))]
gaps = list(find_glm_coverage_gaps(
datetime.datetime(1900, 1, 1, 0, 0),
datetime.datetime(1900, 1, 1, 0, 10),
sector="M1", lat=1.2, lon=2.3))
assert gaps == [
pI(pT("1900-01-01T00:01:00"), pT("1900-01-01T00:05:00")),
pI(pT("1900-01-01T00:06:00"), pT("1900-01-01T00:08:00")),
pI(pT("1900-01-01T00:09:00"), pT("1900-01-01T00:10:00"))]
gaps = list(find_glm_coverage_gaps(
datetime.datetime(1900, 1, 1, 0, 0),
datetime.datetime(1900, 1, 1, 0, 10),
sector="M2", lat=1.2, lon=2.3))
assert gaps == [
pI(pT("1900-01-01T00:01:00"), pT("1900-01-01T00:02:00")),
pI(pT("1900-01-01T00:03:00"), pT("1900-01-01T00:04:00")),
pI(pT("1900-01-01T00:05:00"), pT("1900-01-01T00:10:00"))]
def test_run_glmtools(tmp_path, caplog, monkeypatch):
"""Test running glmtools."""
from sattools.glm import run_glmtools
monkeypatch.setenv("NAS_DATA", str(tmp_path / "nas"))
with patch("sattools.glm.load_file") as sgl:
mocks = [MagicMock() for _ in range(5)]
sgl.return_value.grid_setup.return_value = mocks
with caplog.at_level(logging.INFO):
run_glmtools(
[tmp_path / "lcfa1.nc", tmp_path / "lcfa2.nc"],
sector="F")
assert (f"Running glmtools for {(tmp_path / 'lcfa1.nc')!s} "
f"{(tmp_path / 'lcfa2.nc')!s}" in caplog.text)
mocks[0].assert_called_once()
# confirm we passed the correct sector
assert (cal := sgl().create_parser().parse_args.call_args_list
[0][0][0])[
cal.index("--goes_sector") + 1] == "full"
# try with meso sector, requiring lat/lon
run_glmtools(
[tmp_path / "lcfa1.nc", tmp_path / "lcfa2.nc"],
sector="M1", lat=45, lon=-120)
assert (cal := sgl().create_parser().parse_args.call_args_list
[-1][0][0])[
cal.index("--goes_sector") + 1] == "meso"
assert cal[cal.index("--ctr_lat") + 1] == "45.00"
assert cal[cal.index("--ctr_lon") + 1] == "-120.00"
# try with splitting
mocks[0].reset_mock()
run_glmtools([tmp_path / "lcfa1.nc", tmp_path / "lcfa2.nc"],
max_files=1)
assert mocks[0].call_count == 2
@patch("importlib.util.spec_from_file_location", autospec=True)
@patch("importlib.util.module_from_spec", autospec=True)
def test_load_file(ium, ius):
"""Test loading file as module."""
from sattools.glm import load_file
load_file("module", "/dev/null")
def test_get_integrated_glm(tmp_path):
"""Test getting integrated GLM."""
from sattools.glm import get_integrated_scene
fake_glm = utils.create_fake_glm_for_period(
tmp_path,
datetime.datetime(1900, 1, 1, 0, 0, 0),
datetime.datetime(1900, 1, 1, 0, 4, 0),
"C")
sc = get_integrated_scene(fake_glm)
numpy.testing.assert_array_equal(
sc["flash_extent_density"],
| numpy.full((10, 10), 5) | numpy.full |
from time import time
import numpy as np
import lptml
import itertools
import csv
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
import pandas as pd
import scipy.io as sio
run_mlwga = False
previous_solution = []
def split_pca_learn_metric(x, y, PCA_dim_by, repetitions, t_size, lptml_iterations, S, D, ut, lt, run_hadoop=False, num_machines=10, label_noise=0, rand_state=-1):
experiment_results = {}
global previous_solution
d = len(x[0])
if rand_state < 0:
ss = ShuffleSplit(test_size=1-t_size, n_splits=repetitions)
else:
ss = ShuffleSplit(test_size=1 - t_size, n_splits=repetitions, random_state=rand_state)
for train_index, test_index in ss.split(x):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
# add label noise by re-sampling a fraction of the training labels
if label_noise > 0:
all_labels = np.unique(y_train)
np.random.seed(rand_state)
nss = ShuffleSplit(test_size=label_noise/100, n_splits=1, random_state=rand_state)
for no_noise, yes_noise in nss.split(y_train):
for i in yes_noise:
y_train[i] = np.random.choice(np.setdiff1d(all_labels, y_train[i]), 1);
np.random.seed(None)
for reduce_dim_by in PCA_dim_by:
print("Reducing dimension by", reduce_dim_by)
dimensions = d - reduce_dim_by
if reduce_dim_by > 0:
pca = PCA(n_components=dimensions)
x_pca_train = pca.fit(x_train).transform(x_train)
x_pca_test = pca.fit(x_test).transform(x_test)
else:
x_pca_train = x_train
x_pca_test = x_test
if (ut == 0) and (lt == 0):
distances = []
all_pairs = []
for pair_of_indexes in itertools.combinations(range(0, min(1000, len(x_pca_train))), 2):
all_pairs.append(pair_of_indexes)
distances.append(np.linalg.norm(x_pca_train[pair_of_indexes[0]] - x_pca_train[pair_of_indexes[1]]))
u = np.percentile(distances, 10)
l = np.percentile(distances, 90)
else:
u = ut
l = lt
previous_solution = []
# replace use of d by dim from here
previous_t = 0
for target_iteration in lptml_iterations:
t = target_iteration - previous_t
previous_t = t
print("Algorithm t=", lptml_iterations)
if str(reduce_dim_by) not in experiment_results.keys():
experiment_results[str(reduce_dim_by)] = {str(target_iteration): []}
else:
if str(target_iteration) not in experiment_results[str(reduce_dim_by)].keys():
experiment_results[str(reduce_dim_by)][str(target_iteration)] = []
iteration_results = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
print('u', u, 'l', l)
start_time = time()
if run_mlwga:
if len(S) > 0:
sim = []
dis = []
# select those pairs in S & D that are in the training set
for j in range(len(S)):
if ((S[j][0] - 1) in train_index) and ((S[j][1] - 1) in train_index):
# print("here", np.where(train_index == S[j][0]))
sim.append([np.where(train_index == (S[j][0] - 1))[0][0], np.where(train_index == (S[j][1] - 1))[0][0]])
# print(S[j])
for j in range(len(D)):
if ((D[j][0] - 1) in train_index) and ((D[j][1] - 1) in train_index):
# print(train_index)
dis.append([np.where(train_index == (D[j][0] - 1))[0][0], np.where(train_index == (D[j][1] - 1))[0][0]])
# print(D[j])
G = lptml.fit(x_pca_train, y_train, u, l, t, sim, dis, run_hadoop=run_hadoop, num_machines=num_machines, initial_solution=previous_solution)
else:
G = lptml.fit(x_pca_train, y_train, u, l, t, run_hadoop=run_hadoop, num_machines=num_machines, initial_solution=previous_solution, random_seed=rand_state)
previous_solution = np.dot(np.transpose(G), G)
else:
G = np.identity(len(x_pca_train[1]))
elapsed_time = time() - start_time
print("elapsed time to get G", elapsed_time)
# x_lptml = np.matmul(G, x.T).T
print("what I got back was of type", type(G))
# x_lptml_train, x_lptml_test = x_lptml[train_index], x_lptml[test_index]
try:
x_lptml_train = np.matmul(G, np.transpose(x_pca_train)).T
x_lptml_test = np.matmul(G, np.transpose(x_pca_test)).T
except:
print("continue")
raise
neigh_lptml = KNeighborsClassifier(n_neighbors=4, metric="euclidean")
neigh_lptml.fit(x_lptml_train, np.ravel(y_train))
neigh = KNeighborsClassifier(n_neighbors=4, metric="euclidean")
neigh.fit(x_pca_train, np.ravel(y_train))
y_prediction = neigh.predict(x_pca_test)
y_lptml_prediction = neigh_lptml.predict(x_lptml_test)
iteration_results[0] = accuracy_score(y_test, y_prediction)
iteration_results[1] = accuracy_score(y_test, y_lptml_prediction)
iteration_results[4] = precision_score(y_test, y_prediction, average="macro")
iteration_results[5] = precision_score(y_test, y_lptml_prediction, average="macro")
iteration_results[8] = recall_score(y_test, y_prediction, average="macro")
iteration_results[9] = recall_score(y_test, y_lptml_prediction, average="macro")
iteration_results[12] = f1_score(y_test, y_prediction, average="macro")
iteration_results[13] = f1_score(y_test, y_lptml_prediction, average="macro")
iteration_results[16] = lptml.initial_violation_count
iteration_results[17] = lptml.max_best_solution_d + lptml.max_best_solution_s #violated constraints
d_viol, s_viol = lptml.count_violated_constraints(x_pca_test, y_test, lptml.transformer(np.identity(dimensions)), u, l)
iteration_results[18] = d_viol + s_viol
d_viol, s_viol = lptml.count_violated_constraints(x_pca_test, y_test, G, u, l)
iteration_results[19] = d_viol + s_viol
iteration_results[20] = elapsed_time
print(iteration_results)
experiment_results[str(reduce_dim_by)][str(target_iteration)].append(iteration_results)
return experiment_results
def perform_experiment(x, y, number_of_folds, feat_count, PCA_dim_by, repeat_experiment, result_header, filename, lptml_iterations, S, D, ut, lt, run_hadoop=False, num_machines=10, label_noise=0, rand_state=-1):
results_dict = split_pca_learn_metric(x, y, PCA_dim_by, repeat_experiment, number_of_folds, lptml_iterations, S, D, ut, lt, run_hadoop=run_hadoop, num_machines=num_machines, label_noise=label_noise, rand_state=rand_state)
for pca in PCA_dim_by:
for ite in lptml_iterations:
final_results = ["", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
results = np.array(results_dict[str(pca)][str(ite)])
if pca == 0:
final_results[0] = result_header + " NOPCA"
else:
final_results[0] = result_header + " to " + str(feat_count - pca)
final_results[0] += " t=" + str(ite)
# Averages accuracy for Euclidean, lptml, LMNN, ITML
final_results[1] = np.round(np.average(results[:, 0]), 2)
final_results[2] = np.round(np.average(results[:, 1]), 2)
# Std accuracy for Euclidean, lptml, LMNN, ITML
final_results[3] = np.round(np.std(results[:, 0]), 2)
final_results[4] = np.round(np.std(results[:, 1]), 2)
# Averages precision for Euclidean, lptml, LMNN, ITML
final_results[5] = np.round(np.average(results[:, 4]), 2)
final_results[6] = np.round(np.average(results[:, 5]), 2)
# Std precision for Euclidean, lptml, LMNN, ITML
final_results[7] = np.round(np.std(results[:, 4]), 2)
final_results[8] = np.round(np.std(results[:, 5]), 2)
# Averages recall for Euclidean, lptml, LMNN, ITML
final_results[9] = np.round(np.average(results[:, 8]), 2)
final_results[10] = np.round(np.average(results[:, 9]), 2)
# Std recall for Euclidean, lptml, LMNN, ITML
final_results[11] = np.round(np.std(results[:, 8]), 2)
final_results[12] = np.round(np.std(results[:, 9]), 2)
# Averages F1 score for Euclidean, lptml, LMNN, ITML
final_results[13] = np.round(np.average(results[:, 12]), 2)
final_results[14] = np.round(np.average(results[:, 13]), 2)
# Std F1 score for Euclidean, lptml, LMNN, ITML
final_results[15] = np.round(np.std(results[:, 12]), 2)
final_results[16] = np.round(np.std(results[:, 13]), 2)
# Train initial # violated
final_results[17] = np.round(np.average(results[:, 16]), 2)
final_results[18] = np.round(np.std(results[:, 16]), 2)
# Train final # violated
final_results[19] = np.round(np.average(results[:, 17]), 2)
final_results[20] = np.round(np.std(results[:, 17]), 2)
# Test initial # violated
final_results[21] = np.round(np.average(results[:, 18]), 2)
final_results[22] = np.round(np.std(results[:, 18]), 2)
# Test final # violated
final_results[23] = np.round( | np.average(results[:, 19]) | numpy.average |
# Plot the boxplots for 4000 unmethylated & methylated CpGs each, analysed by old (Nanopolish) & new (NN events) method
# with their 5 values and see if there is a difference:
# Organise your data in the style of 100:5 matrix
# (100 rows corresponding to individual CpG & 5 columns corresponding to 5 signal data points per CpG)
un_old = [[] for i in range(5)]
for CpG in open("/Users/kristinaulicna/Documents/Rotation_1/Archive/DRONE/UnmethOldNoNone.txt"):
CpG = CpG.strip().split('\t')
CpG = [float(i) for i in CpG[6:11]]
index = 0
for number in CpG:
un_old[index].append(CpG[index])
index += 1
print ("Un_Old:", type(un_old[0][0]), un_old[0][0], un_old[1][0], un_old[2][0], un_old[3][0], un_old[4][0])
un_new = [[] for i in range(5)]
for CpG in open("/Users/kristinaulicna/Documents/Rotation_1/Archive/DRONE/UnmethNewNoNone.txt"):
CpG = CpG.strip().split('\t')
CpG = [float(i) for i in CpG[6:11]]
index = 0
for number in CpG:
un_new[index].append(CpG[index])
index += 1
print ("Un_New:", type(un_new[0][0]), un_new[0][0], un_new[1][0], un_new[2][0], un_new[3][0], un_new[4][0])
me_old = [[] for i in range(5)]
for CpG in open("/Users/kristinaulicna/Documents/Rotation_1/Archive/DRONE/MethOldNoNone.txt"):
CpG = CpG.strip().split('\t')
CpG = [float(i) for i in CpG[6:11]]
index = 0
for number in CpG:
me_old[index].append(CpG[index])
index += 1
print ("Me_Old:", type(me_old[0][0]), me_old[0][0], me_old[1][0], me_old[2][0], me_old[3][0], me_old[4][0])
me_new = [[] for i in range(5)]
for CpG in open("/Users/kristinaulicna/Documents/Rotation_1/Archive/DRONE/MethNewNoNone.txt"):
CpG = CpG.strip().split('\t')
CpG = [float(i) for i in CpG[6:11]]
index = 0
for number in CpG:
me_new[index].append(CpG[index])
index += 1
print ("Me_New:", type(me_new[0][0]), me_new[0][0], me_new[1][0], me_new[2][0], me_new[3][0], me_new[4][0])
print ("\nDone!")
#--------- Prepare the data:
import numpy as np
boxplot_datasize = 1500
all_lists = [un_old, un_new, me_old, me_new]
for single_list in all_lists:
index = 0
while index <= 4:
single_list[index] = single_list[index][0:boxplot_datasize]
index += 1
un_old = all_lists[0]
un_new = all_lists[1]
me_old = all_lists[2]
me_new = all_lists[3]
print ()
print ("Un_Old:", type(un_old[0][0]), len(un_old[0]), len(un_old[1]), len(un_old[2]), len(un_old[3]), len(un_old[4]), un_old[0][0], un_old[1][0], un_old[2][0], un_old[3][0], un_old[4][0])
print ("Un_New:", type(un_new[0][0]), len(un_new[0]), len(un_new[1]), len(un_new[2]), len(un_new[3]), len(un_new[4]), un_new[0][0], un_new[1][0], un_new[2][0], un_new[3][0], un_new[4][0])
print ("Me_Old:", type(me_old[0][0]), len(me_old[0]), len(me_old[1]), len(me_old[2]), len(me_old[3]), len(me_old[4]), me_old[0][0], me_old[1][0], me_old[2][0], me_old[3][0], me_old[4][0])
print ("Me_New:", type(me_new[0][0]), len(me_new[0]), len(me_new[1]), len(me_new[2]), len(me_new[3]), len(me_new[4]), me_new[0][0], me_new[1][0], me_new[2][0], me_new[3][0], me_new[4][0])
mean_un_old = [np.mean(un_old[i]) for i in range(5)]
mean_me_old = [np.mean(me_old[i]) for i in range(5)]
mean_un_new = [np.mean(un_new[i]) for i in range(5)]
mean_me_new = [np.mean(me_new[i]) for i in range(5)]
print ("\nMean_Un_Old:", mean_un_old, "\t", "Mean_Me_Old:", mean_me_old, "\t", "Mean_Un_New:", mean_un_new, "\t", "Mean_Me_New:", mean_me_new)
print ("Mean_Un_New:", mean_un_new)
print ("Shift:", | np.mean(mean_un_new) | numpy.mean |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import argparse
from scipy.stats import gamma
from scipy.optimize import minimize,fmin_l_bfgs_b
#import autograd.numpy as np
#from autograd import grad, jacobian, hessian
def measure_sensitivity(X):
N = len(X)
Ds = 1/N * (np.abs(np.max(X) - np.min(X)))
return(Ds)
def measure_sensitivity_private(distribution, N, theta_vector, cliplo, cliphi):
#computed on a surrogate sample different than the one being analyzed
if distribution == 'poisson':
theta = theta_vector[0]
if cliphi == np.inf:
Xprime = np.random.poisson(theta, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds = 1/N * (np.abs(Xmax - Xmin))
if distribution == 'gaussian':
theta, sigma = theta_vector[0], theta_vector[1]
if cliphi == np.inf:
Xprime = np.random.normal(theta, sigma, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds = 1/N * (np.abs(Xmax - Xmin))
if distribution == 'gaussian2':
theta, sigma = theta_vector[0], theta_vector[1]
if cliphi == np.inf:
Xprime = np.random.normal(theta, sigma, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds1 = 1/N * (np.abs(Xmax - Xmin))
Ds2 = 2/N * (np.abs(Xmax - Xmin))
Ds = [Ds1, Ds2]
if distribution == 'gamma':
theta, theta2 = theta_vector[0], theta_vector[1]
if cliphi == np.inf:
Xprime = np.random.gamma(theta2, theta, size=1000)
Xmax, Xmin = np.max(Xprime), np.min(Xprime)
else:
Xmax, Xmin = cliphi, cliplo
Ds = 1/N * (np.abs(Xmax - Xmin))
if distribution == 'gaussianMV':
theta, theta2 = theta_vector[0], theta_vector[1]
K = len(theta)
Xprime = np.random.multivariate_normal(theta, theta2, size=1000)
Xmax, Xmin = np.max(Xprime, axis=0), np.min(Xprime,axis=0)
Ds = 1/N * (np.abs(Xmax.T-Xmin.T))
return(Ds, [Xmin, Xmax])
def A_SSP(X, Xdistribution, privately_computed_Ds, laplace_noise_scale, theta_vector, rho):
N = len(X)
if Xdistribution == 'poisson':
s = 1/N * np.sum(X)
z = np.random.laplace(loc=s, scale=privately_computed_Ds/laplace_noise_scale, size = 1)
theta_hat_given_s = s
theta_hat_given_z = z
return({'0priv': theta_hat_given_z, '0basic': theta_hat_given_s})
if Xdistribution == 'gaussian':
s = 1/N * np.sum(X)
z = np.random.laplace(loc=s, scale=privately_computed_Ds/laplace_noise_scale, size = 1)
theta_hat_given_s = s
theta_hat_given_z = z
return({'0priv': theta_hat_given_z, '0basic': theta_hat_given_s})
if Xdistribution == 'gaussian2':
s1 = 1/N * np.sum(X)
s2 = 1/N * np.sum(np.abs(X-s1)) # see du et al 2020
z1 = np.random.laplace(loc=s1, scale=privately_computed_Ds[0]/(laplace_noise_scale*rho), size = 1)
z2 = np.random.laplace(loc=s2, scale=privately_computed_Ds[1]/(laplace_noise_scale*(1-rho)), size = 1)
theta_hat_given_s = s1
theta2_hat_given_s = np.sqrt(np.pi/2) * max(0.00000001, s2)
theta_hat_given_z = z1
theta2_hat_given_z = np.sqrt(np.pi/2) * max(0.00000001, z2)
return({'0priv': theta_hat_given_z, '1priv': theta2_hat_given_z, '0basic': theta_hat_given_s, '1basic': theta2_hat_given_s})
if Xdistribution == 'gamma':
K = theta_vector[1]
s = 1/N * | np.sum(X) | numpy.sum |
# -*- coding: utf-8 -*-
"""Test cases for the Square Exponential covariance function and its spatial gradient.
Testing is sparse at the moment. The C++ implementations are tested thoroughly (gpp_covariance_test.hpp/cpp) and
we rely more on :mod:`moe.tests.optimal_learning.python.cpp_wrappers.covariance_test`'s comparison
with C++ for verification of the Python code.
TODO(GH-175): Ping testing for spatial gradients and hyperparameter gradients/hessian.
TODO(GH-176): Make test structure general enough to support other covariance functions automatically.
"""
import numpy
import testify as T
from moe.optimal_learning.python.geometry_utils import ClosedInterval
from moe.optimal_learning.python.python_version.covariance import SquareExponential
from moe.optimal_learning.python.python_version.domain import TensorProductDomain
import moe.tests.optimal_learning.python.gaussian_process_test_utils as gp_utils
from moe.tests.optimal_learning.python.optimal_learning_test_case import OptimalLearningTestCase
class SquareExponentialTest(OptimalLearningTestCase):
"""Tests for the computation of the SquareExponential covariance and spatial gradient of covariance.
Tests cases are against manually verified results in various spatial dimensions and some ping tests.
"""
@T.class_setup
def base_setup(self):
"""Set up parameters for test cases."""
self.epsilon = 2.0 * numpy.finfo(numpy.float64).eps
self.CovarianceClass = SquareExponential
self.one_dim_test_sets = numpy.array([
[1.0, 0.1],
[2.0, 0.1],
[1.0, 1.0],
[0.1, 10.0],
[1.0, 1.0],
[0.1, 10.0],
])
self.three_dim_test_sets = numpy.array([
[1.0, 0.1, 0.1, 0.1],
[1.0, 0.1, 0.2, 0.1],
[1.0, 0.1, 0.2, 0.3],
[2.0, 0.1, 0.1, 0.1],
[2.0, 0.1, 0.2, 0.1],
[2.0, 0.1, 0.2, 0.3],
[0.1, 10.0, 1.0, 0.1],
[1.0, 10.0, 1.0, 0.1],
[10.0, 10.0, 1.0, 0.1],
[0.1, 10.0, 1.0, 0.1],
[1.0, 10.0, 1.0, 0.1],
[10.0, 10.0, 1.0, 0.1],
])
def test_square_exponential_covariance_one_dim(self):
"""Test the SquareExponential covariance function against correct values for different sets of hyperparameters in 1D."""
for hyperparameters in self.one_dim_test_sets:
signal_variance = hyperparameters[0]
length = hyperparameters[1]
covariance = self.CovarianceClass(hyperparameters)
# One length away
truth = signal_variance * numpy.exp(-0.5)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0]), numpy.array(length)),
truth,
self.epsilon,
)
# Sym
self.assert_scalar_within_relative(
covariance.covariance(numpy.array(length), numpy.array([0.0])),
truth,
self.epsilon,
)
# One length * sqrt 2 away
truth = signal_variance * numpy.exp(-1.0)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0]), numpy.array([length * numpy.sqrt(2)])),
truth,
self.epsilon,
)
def test_square_exponential_covariance_three_dim(self):
"""Test the SquareExponential covariance function against correct values for different sets of hyperparameters in 3D."""
for hyperparameters in self.three_dim_test_sets:
signal_variance = hyperparameters[0]
length = hyperparameters[1:]
covariance = self.CovarianceClass(hyperparameters)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0, 0.0, 0.0]), numpy.array([0.0, 0.0, length[2]])),
signal_variance * numpy.exp(-0.5),
self.epsilon,
)
self.assert_scalar_within_relative(
covariance.covariance(numpy.array([0.0, 0.0, 0.0]), | numpy.array([0.0, length[1], 0.0]) | numpy.array |
import sys
sys.path.append("./models")
from sklearn.metrics import accuracy_score, f1_score, hamming_loss, jaccard_score
import torch
import numpy as np
from DataIter import BERTDataIter, get_labelbert_input_single_sen
import os
from SigmoidModel import SigmoidModel
from LabelMaskModel import LabelMaskModel
from TrainConfig import TrainConfig
from os.path import join
from scipy.special import expit
import random
import pandas as pd
import logging
logging.basicConfig(level=logging.INFO)
def _pred_logits_fc(model: torch.nn.Module, data_iter, device: torch.device, save_path):
"""
基于非标签掩码的模型预测
"""
model.eval()
y_true, logits = [], []
data_iter.reset()
with torch.no_grad():
for ipt in data_iter:
ipt = {k: v.to(device) for k, v in ipt.items()}
batch_label = ipt.pop("labels")
batch_logits = model(**ipt)[0]
batch_logits = batch_logits.to("cpu").data.numpy()
###
batch_label = batch_label.to("cpu").data.numpy()
y_true.append(batch_label)
logits.append(batch_logits)
y_true = | np.vstack(y_true) | numpy.vstack |
from __future__ import print_function, division
from sklearn.pipeline import make_pipeline
from sklearn import preprocessing
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.model_selection import KFold, cross_validate
import numpy as np
import pickle
import os
# Training function
def train(x, y):
print("Training...")
clf = make_pipeline(preprocessing.StandardScaler(), LDA(solver='lsqr', shrinkage='auto'))
cv = KFold(n_splits=5)
#scores = cross_val_score(clf, x, y, cv=cv)
cv_results = cross_validate(clf, x, y, cv=cv,
scoring=('accuracy', 'roc_auc'),
return_train_score=True)
print("End training")
return clf, cv_results
class ClassifierTrainer(OVBox):
def __init__(self):
super(ClassifierTrainer, self).__init__()
self.stims = []
self.do_train = False
self.do_save = False
self.x = []
self.y = []
def initialize(self):
pass
# Save model after training
def save_model(self, model):
filename = self.setting['classifier_path']
pickle.dump(model, open(filename, 'wb'))
def process(self):
# collect stimulations, keep only target/non_targets ones and binarize them
if self.input[0]:
chunk = self.input[0].pop()
if type(chunk) == OVStimulationSet:
if (chunk):
stim = chunk.pop()
if stim.identifier == OpenViBE_stimulation['OVTK_StimulationId_Target'] or stim.identifier == OpenViBE_stimulation['OVTK_StimulationId_NonTarget']:
self.stims.append(stim.identifier - OpenViBE_stimulation['OVTK_StimulationId_Target'])
if stim.identifier == OpenViBE_stimulation['OVTK_StimulationId_ExperimentStop']:
self.do_train = True
# stack Target feature vectors
if self.input[1]:
x1 = self.input[1].pop()
if type(x1) == OVStreamedMatrixBuffer:
if (x1):
self.x.append(x1)
# stack Non Target features vectors
if self.input[2]:
x2 = self.input[2].pop()
if type(x2) == OVStreamedMatrixBuffer:
if (x2):
self.x.append(x2)
# Cross-validate after Experiments end
if self.do_train:
self.y = | np.array(self.stims) | numpy.array |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | np.array([]) | numpy.array |
# -*- coding: utf-8 -*-
# @Time : 2021/2/10 11:24 上午
# @Author : <NAME>
# @fileName: SaDE.py
# @Software: PyCharm
# @Blog :https://lesliewongcv.github.io/
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
import DifferenceEvolution as DE
import GenericAlgorithm as GA
NP = 50
D = 10
CRm = np.random.normal(0.5, 0.1)
X = np.arange(-5, 5, 0.1)
Y = X ** 2
ITER = 100
rep_set = np.zeros(4)
LP = 2
# ITER = 3
def rand_1_mutation(popu_):
offspringSet = np.zeros((NP, D))
for j in range(NP):
f = np.random.normal(0.5, 0.3)
sub_popu = np.delete(popu_, j, axis=0)
rand_sub = np.arange(sub_popu.shape[0])
np.random.shuffle(rand_sub)
offspringSet[j] = sub_popu[rand_sub[4]] + f * (sub_popu[rand_sub[0]] - sub_popu[rand_sub[1]])
return offspringSet
def rand2best_2_mutation(popu_):
offspringSet = np.zeros((NP, D))
for k in range(NP):
f = np.random.normal(0.5, 0.3)
sub_popu = np.delete(popu_, k, axis=0)
rand_sub = np.arange(sub_popu.shape[0])
np.random.shuffle(rand_sub)
offspringSet[k] = sub_popu[rand_sub[4]] + f * (sub_popu[rand_sub[0]] - sub_popu[rand_sub[1]]) \
+ f * (sub_popu[rand_sub[2]] - sub_popu[rand_sub[3]])
return offspringSet
def cur2rand_1_mutation(popu_):
offspringSet = np.zeros((NP, D))
for l in range(NP):
f = | np.random.normal(0.5, 0.3) | numpy.random.normal |
import unittest
import numpy.testing as npt
import numpy as np
import os
from conjgrad import cg_solve
DEFAULT_TOL = 2e-2
DEFAULT_ATOL = 2e-2
def make_sparse_inputs(A, b):
N = len(b)
spA = np.zeros((N, 18))
C = np.zeros(18 * N, dtype=int)
for i in range(N):
filt = A[i, :] != 0
Ai = A[i, filt]
conn = np.where(filt)[0]
spA[i, :len(Ai)] = Ai
C[18 * i:18 * i + len(conn)] = conn
return spA, C
class LASTest(unittest.TestCase):
def test_cg_eye_rand(self):
A = np.eye(10)
b = np.random.rand(10)
res = cg_solve(A, b)
sol = | np.linalg.solve(A, b) | numpy.linalg.solve |
import collections
import abc
import colorama
import numpy as np
import scipy
import sklearn.mixture
import scipy.stats
import state
import mposterior
# a named tuple for a more intuitive access to a "exchange tuple"
ExchangeTuple = collections.namedtuple(
'ExchangeTuple', ['i_PE', 'i_particle_within_PE', 'i_neighbour', 'i_particle_within_neighbour'])
# a named tuple for a more intuitive access to a "exchange tuple"
NeighbourParticlesTuple = collections.namedtuple(
'NeighbourParticlesTuple', ['i_neighbour', 'i_particles'])
class ExchangeRecipe(metaclass=abc.ABCMeta):
def __init__(self, processing_elements_topology):
self._PEs_topology = processing_elements_topology
# for the sake of convenience, we keep the number of PEs...
self._n_PEs = processing_elements_topology.n_processing_elements
def randomized_wakeup(self, n, PRNG):
# time elapsed between ticks of the PEs' clocks (each row is the tick of the corresponding PE)
time_elapsed_between_ticks = PRNG.exponential(size=(self._n_PEs, n))
# waking times for every PE (as many as the number of iterations so that, in principle, any PE can *always* be
# the chosen one)
ticks_absolute_time = time_elapsed_between_ticks.cumsum(axis=1)
# these are the indexes of the PEs that will wake up to exchange statistics with a neighbour (notice that a
# certain PE can show up several times)
# REMARK I: [0] is because we only care about the index of the waking PE (and not the instant)
# REMARK II: [:self.n_iterations] is because we only consider the "self.n_iterations" earliest wakings
i_waking_PEs = np.unravel_index(np.argsort(ticks_absolute_time, axis=None), (self._n_PEs, n))[0][:n]
return i_waking_PEs
def perform_exchange(self, DPF):
pass
def messages(self):
return np.NaN
@property
def n_processing_elements(self):
return self._n_PEs
@property
def PEs_topology(self):
return self._PEs_topology
# a decorator
class IteratedExchangeRecipe(ExchangeRecipe):
def __init__(self, exchange_recipe, n_iterations):
self._exchange_recipe = exchange_recipe
self._n_iterations = n_iterations
def messages(self):
return self._exchange_recipe.messages()*self._n_iterations
def perform_exchange(self, DPF):
for _ in range(self._n_iterations):
self._exchange_recipe.perform_exchange(DPF)
@property
def n_processing_elements(self):
return self._exchange_recipe.n_processing_elements
class ParticlesBasedExchangeRecipe(ExchangeRecipe):
def __init__(
self, processing_elements_topology, n_particles_per_processing_element, exchanged_particles):
super().__init__(processing_elements_topology)
# the "contacts" of each PE are the PEs it is going to exchange/share particles with
self.processing_elements_contacts = self.get_PEs_contacts()
# the number of particles that are to be exchanged between a couple of neighbours is computed (or set)
if type(exchanged_particles) is int:
self.n_particles_exchanged_between_neighbours = exchanged_particles
elif type(exchanged_particles) is float:
# it is computed accounting for the maximum number of neighbours a given PE can have
self.n_particles_exchanged_between_neighbours = int(
(n_particles_per_processing_element * exchanged_particles) // max(
[len(neighbourhood) for neighbourhood in self.processing_elements_contacts]))
else:
raise Exception('type of "exchanged_particles" is not valid')
if self.n_particles_exchanged_between_neighbours is 0:
raise Exception('no particles are to be shared by a PE with its processing_elements_contacts')
def perform_exchange(self, DPF):
# first, we gather all the particles that are going to be exchanged in an auxiliar variable
aux = []
for exchangeTuple in self.exchange_tuples:
aux.append(
(DPF.PEs[exchangeTuple.i_PE].get_particle(exchangeTuple.i_particle_within_PE),
DPF.PEs[exchangeTuple.i_neighbour].get_particle(exchangeTuple.i_particle_within_neighbour)))
# afterwards, we loop through all the exchange tuples performing the real exchange
for (exchangeTuple, particles) in zip(self.exchange_tuples, aux):
DPF.PEs[exchangeTuple.i_PE].set_particle(exchangeTuple.i_particle_within_PE, particles[1])
DPF.PEs[exchangeTuple.i_neighbour].set_particle(exchangeTuple.i_particle_within_neighbour, particles[0])
def messages(self):
# the number of hops between each pair of PEs
distances = self._PEs_topology.distances_between_processing_elements
# overall number of messages sent/received in an exchange step
n_messages = 0
# for every PE (index) along with its list of neighbours
for i_processing_element, neighbours_list in enumerate(self.neighbours_particles):
# each element of the list is a tuple (<index neighbour>,<indexes of the particles exchanged with that neighbour>)
for i_neighbour, i_particles in neighbours_list:
# the number of messages required to send the samples
n_messages += distances[i_processing_element, i_neighbour]*len(i_particles)*state.n_elements
# we also need to send the aggregated weight to each neighbour
n_messages += len(neighbours_list)
return n_messages
@abc.abstractmethod
def get_PEs_contacts(self):
pass
@abc.abstractproperty
def exchange_tuples(self):
pass
@abc.abstractproperty
def neighbours_particles(self):
pass
class DRNAExchangeRecipe(ParticlesBasedExchangeRecipe):
def __init__(
self, processing_elements_topology, n_particles_per_processing_element, exchanged_particles,
PRNG=np.random.RandomState(), allow_exchange_one_particle_more_than_once=False):
super().__init__(processing_elements_topology, n_particles_per_processing_element, exchanged_particles)
# indexes of the particles...just for the sake of efficiency (this array will be used many times)
i_particles = np.arange(n_particles_per_processing_element)
# an array to keep tabs on pairs of PEs already processed
already_processed_PEs = np.zeros((self._n_PEs, self._n_PEs), dtype=bool)
# in order to keep tabs on which particles a given PE has already "promised" to exchange
particles_not_swapped_yet = np.ones((self._n_PEs, n_particles_per_processing_element), dtype=bool)
# all the elements in this array will be "true" across all the iterations of the loop below
candidate_particles_all_true = particles_not_swapped_yet.copy()
if allow_exchange_one_particle_more_than_once:
# the "reference" below is set to a fixed all-true array
candidate_particles = candidate_particles_all_true
else:
# the "reference" is set to the (varying) "particles_not_swapped_yet"
candidate_particles = particles_not_swapped_yet
# named tuples as defined above, each representing an exchange
self._exchangeTuples = []
# a list in which the i-th element is also a list containing tuples of the form (<neighbour index>,<numpy array>
# with the indices of particles to be exchanged with that neighbour>)
self._neighbours_particles = [[] for _ in range(self._n_PEs)]
for iPE, i_this_PE_neighbours in enumerate(self.processing_elements_contacts):
for iNeighbour in i_this_PE_neighbours:
if not already_processed_PEs[iPE, iNeighbour]:
# the particles to be exchanged are chosen randomly (with no replacement) for both, this PE...
i_exchanged_particles_within_PE = PRNG.choice(
i_particles[candidate_particles[iPE, :]],
size=self.n_particles_exchanged_between_neighbours, replace=False)
# ...and the corresponding neighbour
i_exchanged_particles_within_neighbour = PRNG.choice(
i_particles[candidate_particles[iNeighbour, :]],
size=self.n_particles_exchanged_between_neighbours, replace=False)
# new "exchange tuple"s are generated
self._exchangeTuples.extend([ExchangeTuple(
i_PE=iPE, i_particle_within_PE=iParticleWithinPE, i_neighbour=iNeighbour,
i_particle_within_neighbour=iParticleWithinNeighbour
) for iParticleWithinPE, iParticleWithinNeighbour in zip(
i_exchanged_particles_within_PE, i_exchanged_particles_within_neighbour)])
# these PEs (the one considered in the main loop and the neighbour being processed) should not
# exchange the selected particles (different in each case) with other PEs
particles_not_swapped_yet[iPE, i_exchanged_particles_within_PE] = False
particles_not_swapped_yet[iNeighbour, i_exchanged_particles_within_neighbour] = False
# we "mark" this pair of PEs as already processed; despite the symmetry,
# only "already_processed_PEs[iNeighbour, iPe]" should be accessed later on
already_processed_PEs[iNeighbour, iPE] = already_processed_PEs[iPE, iNeighbour] = True
# each tuple specifies a neighbor, and the particles THE LATTER exchanges with it (rather than
# the other way around)
self._neighbours_particles[iPE].append(
NeighbourParticlesTuple(iNeighbour, i_exchanged_particles_within_neighbour))
self._neighbours_particles[iNeighbour].append(
NeighbourParticlesTuple(iPE, i_exchanged_particles_within_PE))
@property
def exchange_tuples(self):
return self._exchangeTuples
# this is only meant to be used by subclasses (specifically, Mposterior-related ones)
@property
def neighbours_particles(self):
"""Particles received from each neighbour.
Returns
-------
neighbours, particles : list of lists
Every individual list contains tuples of the form (<index neighbour>, <indexes particles within
that neighbour>) for the corresponding PE
"""
return self._neighbours_particles
def get_PEs_contacts(self):
return self._PEs_topology.get_neighbours()
class MposteriorExchangeRecipe(DRNAExchangeRecipe):
def __init__(
self, processing_elements_topology, n_particles_per_processing_element, exchanged_particles,
weiszfeld_parameters, PRNG=np.random.RandomState(), allow_exchange_one_particle_more_than_once=False):
super().__init__(processing_elements_topology, n_particles_per_processing_element, exchanged_particles,
PRNG, allow_exchange_one_particle_more_than_once)
self.weiszfeld_parameters = weiszfeld_parameters
self.i_own_particles_within_PEs = [PRNG.randint(
n_particles_per_processing_element, size=self.n_particles_exchanged_between_neighbours
) for _ in range(self._n_PEs)]
def perform_exchange(self, DPF):
for PE, this_PE_neighbours_particles, i_this_PE_particles in zip(
DPF.PEs, self.neighbours_particles, self.i_own_particles_within_PEs):
# a list with the subset posterior of each neighbour
subset_posterior_distributions = [
DPF.PEs[neighbour_particles.i_neighbour].get_samples_at(neighbour_particles.i_particles).T
for neighbour_particles in this_PE_neighbours_particles]
# a subset posterior obtained from this PE is also added: it encompasses
# the particles whose indexes are given in "i_this_PE_particles"
subset_posterior_distributions.append(PE.get_samples_at(i_this_PE_particles).T)
joint_particles, joint_weights = mposterior.find_weiszfeld_median(
subset_posterior_distributions, **self.weiszfeld_parameters)
# the indexes of the particles to be kept
i_new_particles = DPF._resampling_algorithm.get_indexes(joint_weights, PE.n_particles)
PE.samples = joint_particles[:, i_new_particles]
PE.log_weights = np.full(PE.n_particles, - | np.log(PE.n_particles) | numpy.log |
"""Tests for command serialization."""
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
from PIL import Image
import pytest
import scenepic as sp
def test_color():
expected = sp.Color(0.5, 0, 0.2)
actual = sp.ColorFromBytes(255, 0, 102) * 0.5
np.testing.assert_array_almost_equal(actual, expected)
def test_camera(assert_json_equal):
center = np.array([0, 2, 0], np.float32)
look_at = np.array([0, 1, 0], np.float32)
up_dir = np.array([1, 0, 0], np.float32)
fov_y_degrees = 45.0
initial_aspect_ratio = 1.5
new_aspect_ratio = 0.9
znear = 0.01
zfar = 20
rotation = np.array([
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]
], np.float32)
world_to_camera = np.array([
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 0, -2],
[0, 0, 0, 1]
], np.float32)
camera_to_world = np.array([
[0, 1, 0, 0],
[0, 0, 1, 2],
[1, 0, 0, 0],
[0, 0, 0, 1]
], np.float32)
projection = sp.Transforms.gl_projection(fov_y_degrees, initial_aspect_ratio, znear, zfar)
look_at_cam = sp.Camera(center, look_at, up_dir, fov_y_degrees, znear, zfar, initial_aspect_ratio)
np.testing.assert_array_almost_equal(look_at_cam.center, center)
np.testing.assert_array_almost_equal(look_at_cam.look_at, look_at)
np.testing.assert_array_almost_equal(look_at_cam.up_dir, up_dir)
np.testing.assert_array_almost_equal(look_at_cam.world_to_camera, world_to_camera)
np.testing.assert_array_almost_equal(look_at_cam.camera_to_world, camera_to_world)
np.testing.assert_array_almost_equal(look_at_cam.projection, projection)
assert_json_equal(str(look_at_cam), "camera")
look_at_cam.aspect_ratio = new_aspect_ratio
assert look_at_cam.aspect_ratio == pytest.approx(new_aspect_ratio)
rt_cam = sp.Camera(center, rotation, fov_y_degrees, znear, zfar, new_aspect_ratio)
np.testing.assert_array_almost_equal(rt_cam.center, center)
np.testing.assert_array_almost_equal(rt_cam.look_at, look_at)
np.testing.assert_array_almost_equal(rt_cam.up_dir, up_dir)
np.testing.assert_array_almost_equal(rt_cam.world_to_camera, world_to_camera)
np.testing.assert_array_almost_equal(rt_cam.camera_to_world, camera_to_world)
assert rt_cam.aspect_ratio == pytest.approx(new_aspect_ratio)
rt_cam.aspect_ratio = initial_aspect_ratio
np.testing.assert_array_almost_equal(rt_cam.projection, projection)
assert_json_equal(str(rt_cam), "camera")
cam = sp.Camera(world_to_camera, fov_y_degrees, znear, zfar, new_aspect_ratio)
np.testing.assert_array_almost_equal(cam.center, center)
np.testing.assert_array_almost_equal(cam.look_at, look_at)
np.testing.assert_array_almost_equal(cam.up_dir, up_dir)
np.testing.assert_array_almost_equal(cam.world_to_camera, world_to_camera)
np.testing.assert_array_almost_equal(cam.camera_to_world, camera_to_world)
assert cam.aspect_ratio == pytest.approx(new_aspect_ratio)
cam.aspect_ratio = initial_aspect_ratio
np.testing.assert_array_almost_equal(cam.projection, projection)
assert_json_equal(str(cam), "camera")
def test_canvas2d(assert_json_equal):
scene = sp.Scene()
canvas2d = scene.create_canvas_2d("canvas2d")
frame2d = canvas2d.create_frame()
frame2d.add_circle(0, 0, 5)
frame2d = canvas2d.create_frame()
frame2d.add_rectangle(5, 6, 7, 8)
frame2d = canvas2d.create_frame()
frame2d.add_text("test", 1, 1)
frame2d = canvas2d.create_frame()
frame2d.add_image("rand")
assert_json_equal(str(canvas2d), "canvas2d")
canvas2d.clear_script()
frame2d = canvas2d.create_frame()
positions = np.array(
[[0, 0],
[1, 1],
[2, 2]], np.float32
)
frame2d.add_line(positions)
assert_json_equal(str(canvas2d), "canvas2d_cleared")
def test_canvas3d(assert_json_equal, color):
scene = sp.Scene()
cube_mesh = scene.create_mesh("cube")
cube_mesh.add_cube(color)
cone_mesh = scene.create_mesh("cone")
cone_mesh.add_cone(color)
disc_mesh = scene.create_mesh("disc")
disc_mesh.add_disc(color)
icosphere_mesh = scene.create_mesh("icosphere")
icosphere_mesh.add_icosphere(color)
cylinder_mesh = scene.create_mesh("cylinder")
cylinder_mesh.add_cylinder(color)
canvas3d = scene.create_canvas_3d("canvas3d")
frame3d = canvas3d.create_frame("", [1, 0, 0])
frame3d.add_mesh(cube_mesh)
frame3d = canvas3d.create_frame()
frame3d.add_mesh(disc_mesh, sp.Transforms.Scale(5))
frame3d = canvas3d.create_frame()
frame3d.add_mesh(icosphere_mesh)
frame3d = canvas3d.create_frame()
frame3d.add_mesh(cylinder_mesh)
assert_json_equal(str(canvas3d), "canvas3d")
canvas3d.clear_script()
frame3d = canvas3d.create_frame()
frame3d.add_mesh(cone_mesh)
assert_json_equal(str(canvas3d), "canvas3d_cleared")
def test_drop_down_menu(assert_json_equal):
scene = sp.Scene()
drop_down_menu = scene.create_drop_down_menu("", "DropDown")
drop_down_menu.items = ["one", "two", "three"]
assert_json_equal(str(drop_down_menu), "drop_down_menu")
def test_frame2d(assert_json_equal):
scene = sp.Scene()
canvas2d = scene.create_canvas_2d()
frame2d = canvas2d.create_frame()
frame2d.add_circle(0, 0, 5.0)
assert_json_equal(str(frame2d), "frame2d")
def test_frame3d(assert_json_equal, color):
scene = sp.Scene()
cube_mesh = scene.create_mesh("cube")
cube_mesh.add_cube(color)
canvas3d = scene.create_canvas_3d()
frame3d = canvas3d.create_frame("", [1, 0, 0])
frame3d.add_mesh(cube_mesh)
assert_json_equal(str(frame3d), "frame3d")
def test_image(assert_json_equal, color, asset):
scene = sp.Scene()
image = scene.create_image("rand")
image.load(asset("rand.png"))
assert_json_equal(str(image), "image")
pixels = np.array(Image.open(asset("rand.png")))
image.from_numpy(pixels, "png")
assert_json_equal(str(image), "image")
with open(asset("rand.png"), "rb") as file:
buffer = file.read()
image.load_from_buffer(buffer, "png")
assert_json_equal(str(image), "image")
mesh = scene.create_mesh("image")
mesh.texture_id = image.image_id
mesh.add_image()
assert_json_equal(str(mesh), "image_mesh")
def test_audio(assert_json_equal, asset):
scene = sp.Scene()
audio = scene.create_audio("hello")
audio.load(asset("hello.mp3"))
assert_json_equal(str(audio), "audio")
def test_video(assert_json_equal, asset):
scene = sp.Scene()
video = scene.create_video("test")
video.load(asset("test.mp4"))
assert_json_equal(str(video), "video")
def test_label(assert_json_equal):
scene = sp.Scene()
scene.create_label()
assert_json_equal(scene.get_json(), "label")
def test_primitives(assert_json_equal, asset, color):
scene = sp.Scene()
texture = scene.create_image("uv")
texture.load(asset("uv.png"))
mesh = scene.create_mesh("triangle")
mesh.add_triangle(color)
assert_json_equal(str(mesh), "triangle")
mesh = scene.create_mesh("quad")
mesh.add_quad(color)
assert_json_equal(str(mesh), "quad")
mesh = scene.create_mesh("cube")
mesh.add_cube(color)
assert_json_equal(str(mesh), "cube")
mesh = scene.create_mesh("cube_texture", texture_id=texture.image_id)
mesh.add_cube()
assert_json_equal(str(mesh), "cube_texture")
mesh = scene.create_mesh("thickline")
mesh.add_thickline(color)
assert_json_equal(str(mesh), "thickline")
mesh = scene.create_mesh("cone")
mesh.add_cone(color)
assert_json_equal(str(mesh), "cone")
mesh = scene.create_mesh("trunc_cone")
mesh.add_cone(color, truncation_height=0.7)
assert_json_equal(str(mesh), "trunc_cone")
mesh = scene.create_mesh("coordinate_axes_0")
mesh.add_coordinate_axes()
assert_json_equal(str(mesh), "coordinate_axes_0")
mesh = scene.create_mesh("coordinate_axes_1", shared_color=sp.Colors.White)
mesh.add_cube()
mesh.add_coordinate_axes()
assert_json_equal(str(mesh), "coordinate_axes_1")
mesh = scene.create_mesh("camera_frustum")
mesh.add_camera_frustum(color)
assert_json_equal(str(mesh), "camera_frustum")
mesh = scene.create_mesh("disc")
mesh.add_disc(color)
assert_json_equal(str(mesh), "disc")
mesh = scene.create_mesh("cylinder")
mesh.add_cylinder(color)
assert_json_equal(str(mesh), "cylinder")
mesh = scene.create_mesh("sphere")
mesh.add_sphere(color)
assert_json_equal(str(mesh), "sphere")
mesh = scene.create_mesh("icosphere")
mesh.add_icosphere(color)
assert_json_equal(str(mesh), "icosphere")
mesh = scene.create_mesh("icosphere_texture", texture_id=texture.image_id)
mesh.add_icosphere(steps=1)
assert_json_equal(str(mesh), "icosphere_texture")
mesh = scene.create_mesh("uv_sphere")
mesh.add_uv_sphere(color)
assert_json_equal(str(mesh), "uv_sphere")
mesh = scene.create_mesh("point_cloud")
positions = []
for x in range(5):
for y in range(5):
for z in range(5):
positions.append([x, y, z])
positions = np.array(positions, np.float32)
positions = (positions / 2) - 1
mesh.add_cube(color)
mesh.apply_transform(sp.Transforms.scale(0.01))
mesh.enable_instancing(positions)
assert_json_equal(str(mesh), "point_cloud")
mesh = scene.create_mesh("line_cloud")
mesh.add_lines(positions, positions * 10, color)
assert_json_equal(str(mesh), "line_cloud")
def test_io(assert_json_equal, asset):
scene = sp.Scene()
image = scene.create_image("texture")
image.load(asset("PolarBear.png"))
mesh = scene.create_mesh("cube")
mesh.texture_id = image.image_id
mesh_info = sp.load_obj(asset("cube.obj"))
mesh.add_mesh(mesh_info)
assert_json_equal(str(mesh), "io")
def test_mesh_update(assert_json_equal, color):
scene = sp.Scene()
mesh = scene.create_mesh("base")
mesh.add_triangle(color)
positions = np.array([
[0, 0, 0],
[1, 0, 0],
[0, 0, 1]
], np.float32)
normals = np.array([
[0, -1, 0],
[0, -1, 0],
[0, -1, 0]
], np.float32)
colors = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
], np.float32)
update = scene.update_mesh_positions("base", positions, "update0")
assert_json_equal(str(update), "update0")
update = scene.update_mesh("base", positions, normals, colors, "update1")
assert_json_equal(str(update), "update1")
keyframe_buffer = update.vertex_buffer.copy()
keyframe_buffer["pos"][0] = [0, 1, 1]
update.quantize(1, 6.0, keyframe_buffer)
assert_json_equal(str(update), "update_quantized")
instance_pos = np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2]
], np.float32)
instance_rot = np.array([
[0.11, 0.22, 0.46, 0.85],
[0.46, -0.12, -0.22, 0.85],
[0.22, -0.12, 0.46, 0.85]
], np.float32)
instance_c = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
], np.float32)
mesh.enable_instancing(instance_pos, instance_rot, instance_c)
instance_pos[0] = [1, 1, 0]
update = scene.update_mesh_positions("base", instance_pos, "update2")
assert_json_equal(str(update), "update2")
instance_pos[1] = [1, 0, 1]
instance_rot[0] = [0.24, 0.24, 0.06, 0.94]
instance_c[0] = [0.5, 0, 0]
update = scene.update_instanced_mesh("base", instance_pos, instance_rot, instance_c, "update3")
assert_json_equal(str(update), "update3")
def test_quantization(assert_json_equal, color):
scene = sp.Scene()
mesh = scene.create_mesh("base")
mesh.add_triangle(color)
for i in range(20):
positions = np.array([
[0, 0, 0],
[1, i * 0.05, 0],
[0, 1, 0]
], np.float32)
scene.update_mesh_positions("base", positions)
info = scene.quantize_updates(1e-5)
assert info["base"].keyframe_count == 2
assert_json_equal(scene.get_json(), "quantization")
def _create_tetrahedron():
vertices = np.array([
[-0.5, -0.32476, -0.20412],
[0.5, -0.32476, -0.20412],
[0, 0.541266, -0.20412],
[0, 0.108253, 0.612372]
], np.float32)
triangles = np.array([
[0, 1, 3],
[1, 2, 3],
[2, 0, 3],
[0, 2, 1]
], np.uint32)
return vertices, triangles
SIZE = 500
def test_scene(assert_json_equal, asset):
scene = sp.Scene("test")
image = scene.create_image("rand")
image.load(asset("rand.png"))
mesh_rand = scene.create_mesh()
mesh_rand.texture_id = image.image_id
mesh_rand.layer_id = "Test"
mesh_rand.add_image([-0.5, -0.5, 0], [2, 0, 0])
tet_verts, tet_tris = _create_tetrahedron()
model_mesh = scene.create_mesh()
model_mesh.shared_color = sp.Color(1, 0, 0)
model_mesh.add_mesh_without_normals(tet_verts, tet_tris)
model_mesh.reverse_triangle_order()
canvas_rand = scene.create_canvas_3d("", SIZE, SIZE)
tet_center = tet_verts.mean(axis=0)
canvas_tet = scene.create_canvas_3d("", SIZE, SIZE)
canvas_tet.camera = sp.Camera(tet_center + np.array([0, 0, 0.5], np.float32),
tet_center, [0, 1, 0], 45.0)
canvas_tet.shading = sp.Shading(sp.Colors.White)
canvas_tet.ui_parameters = sp.UIParameters()
n_frames = 5
for i in range(n_frames):
angle = 2 * np.pi * i / n_frames
frame_rand = canvas_rand.create_frame()
frame_rand.add_mesh(mesh_rand, sp.Transforms.Translate([np.cos(angle), np.sin(angle), 0]))
mesh_primitives = scene.create_mesh()
mesh_primitives.layer_id = "Primitives"
mesh_primitives.add_disc(sp.Color(0, 1, 0),
sp.Transforms.Scale(0.2 + 0.2 * (1 + np.cos(angle))),
10, False, True)
mesh_primitives.add_cube(sp.Color(0, 0, 1), sp.Transforms.Translate([-1, -1, -3]))
frame_rand.add_mesh(mesh_primitives)
mesh_noise = scene.create_mesh()
mesh_noise.shared_color = sp.Color(1, 0, 0)
mesh_noise.layer_id = "Noise"
mesh_noise.add_cylinder()
mesh_noise.apply_transform(sp.Transforms.Scale([0.02, 0.1, 0.1]))
mesh_noise.apply_transform(sp.Transforms.RotationToRotateXAxisToAlignWithAxis([0.5, 0.5, 0.5]))
positions = | np.zeros((16, 3), np.float32) | numpy.zeros |
import numpy as np
class Target:
def __init__(self,
init_weight=1.0,
init_state=np.array([[0.0], [0.0], [0.0], [0.0]]),
init_cov=np.diag((0.01, 0.01, 0.01, 0.01)),
process_noise=0.001,
step=3,
dt_1=1,
dt_2=1):
self.state = init_state
self.state_cov = init_cov
self.weight = init_weight
self.measure_cov = init_cov
self.dt_1 = dt_1
self.dt_2 = dt_2
self.all_states = []
self.all_states.append(init_state)
self.all_cov = []
self.all_cov.append(init_cov)
self.state[2][0] = step
self.state[3][0] = step
self.A = np.array([[1, 0, dt_1, 0],
[0, 1, 0, dt_2],
[0, 0, 1, 0],
[0, 0, 0, 1]])
self.B = | np.eye(init_state.shape[0]) | numpy.eye |
#!/usr/bin/env python3
import os, time, json
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from matplotlib.colors import LogNorm
from scipy.integrate import quad
import tinydb as db
import argparse
from matplotlib.lines import Line2D
c_f = 1e-12
data = np.genfromtxt("./V_HV.txt") #test1
data1 = np.genfromtxt('./V_HV2.txt') #test4
data2 = np.genfromtxt('./V_HV3.txt') #test5
v_hv = np.asarray(data[1:,0])
v_out = | np.asarray(data[1:,2]) | numpy.asarray |
import sys
sys.path.append('../')
from collections import deque
import os
from pathlib import Path
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from matplotlib.backend_bases import MouseButton
import pickle
from re import split
from scipy.cluster.vq import vq, kmeans2
from py_diff_pd.common.project_path import root_path
from py_diff_pd.common.common import ndarray, create_folder, print_info, print_warning
def extract_intrinsic_parameters(K):
K = ndarray(K).copy()
cx = K[0, 2]
cy = K[1, 2]
alpha = K[0, 0]
cot_theta = K[0, 1] / -alpha
tan_theta = 1 / cot_theta
theta = np.arctan(tan_theta)
if theta < 0:
theta += np.pi
beta = K[1, 1] * np.sin(theta)
return { 'alpha': alpha, 'beta': beta, 'theta': theta, 'cx': cx, 'cy': cy }
def assemble_intrinsic_parameters(alpha, beta, theta, cx, cy):
K = np.zeros((3, 3))
K[0, 0] = alpha
K[0, 1] = -alpha / np.tan(theta)
K[0, 2] = cx
K[1, 1] = beta / np.sin(theta)
K[1, 2] = cy
K[2, 2] = 1
return ndarray(K).copy()
def solve_camera(points_in_pixel, points_in_world):
# This is a better reference: https://web.stanford.edu/class/cs231a/course_notes/01-camera-models.pdf
#
# The pixel space is:
# - Origin: lower left.
# - x: left to right.
# - y: bottom to top.
# Let p and P be points_in_pixel (2D) and points_in_world (3D), respectively.
# Let R and t be the orientation and location of the world frame in the camera frame.
# T = [R, t]
# [0, 1]
# K = [alpha, -alpha * cot theta, cx, 0]
# [0, beta / sin theta, cy, 0]
# [0, 0, 1, 0]
# Pixels: alpha * (x - cot theta * y) / z + cx
# beta / sin theta * y / z + cy
# which makes sense if the image is skewed to its right.
# [p, 1] = Homogenous(KT[P, 1]).
# Let M = KT \in R^{3 x 4} = [m1, m2, m3]
# p.x = <m1, [P, 1]> / <m3, [P, 1]>.
# p.y = <m2, [P, 1]> / <m3, [P, 1]>.
# p.x * <m3, [P, 1]> - <m1, [P, 1]> = 0.
# p.y * <m3, [P, 1]> - <m2, [P, 1]> = 0.
# Let's flatten them into a linear system.
points_in_pixel = ndarray(points_in_pixel).copy()
points_in_world = ndarray(points_in_world).copy()
num_points = points_in_pixel.shape[0]
assert (num_points, 2) == points_in_pixel.shape
assert (num_points, 3) == points_in_world.shape
P = ndarray(np.zeros((2 * num_points, 12)))
for i in range(num_points):
# Assemble the x equation.
# m1:
P[2 * i, :3] = -points_in_world[i]
P[2 * i, 3] = -1
# m3:
P[2 * i, 8:11] = points_in_world[i] * points_in_pixel[i, 0]
P[2 * i, 11] = points_in_pixel[i, 0]
# Assemble the y equation.
# m2:
P[2 * i + 1, 4:7] = -points_in_world[i]
P[2 * i + 1, 7] = -1
# m3:
P[2 * i + 1, 8:11] = points_in_world[i] * points_in_pixel[i, 1]
P[2 * i + 1, 11] = points_in_pixel[i, 1]
# Now m can be obtained from P * m = 0.
# We solve this by minimizing \|P * m\|^2 s.t. \|m\|^2 = 1.
# Consider SVD of P: P = U * Sigma * V.T.
U, Sigma, Vt = np.linalg.svd(P)
# U @ np.diag(Sigma) @ Vt = P.
# So, Vt * m = [0, 0, 0, ..., 1], or m = V * [0, 0, 0, ..., 1].
m = Vt[-1]
M = ndarray(np.reshape(m, (3, 4)))
# Now we know M = 1 / rho * KT. Let's extract camera parameters.
a1 = M[0, :3]
a2 = M[1, :3]
a3 = M[2, :3]
rho = 1 / np.linalg.norm(a3)
cx = rho * rho * (a1.dot(a3))
cy = rho * rho * (a2.dot(a3))
a1_cross_a3 = np.cross(a1, a3)
a2_cross_a3 = np.cross(a2, a3)
cos_theta = -a1_cross_a3.dot(a2_cross_a3) / (np.linalg.norm(a1_cross_a3) * np.linalg.norm(a2_cross_a3))
theta = np.arccos(cos_theta)
alpha = rho * rho * np.linalg.norm(a1_cross_a3) * np.sin(theta)
beta = rho * rho * np.linalg.norm(a2_cross_a3) * np.sin(theta)
K = ndarray([[alpha, -alpha / np.tan(theta), cx],
[0, beta / np.sin(theta), cy],
[0, 0, 1]])
# Extrinsic camera info:
r1 = a2_cross_a3 / np.linalg.norm(a2_cross_a3)
# r3 has two possibilities. We need to figure out which one is better.
r3_pos = rho * a3
r2_pos = np.cross(r3_pos, r1)
R_pos = np.vstack([r1, r2_pos, r3_pos])
r3_neg = -rho * a3
r2_neg = np.cross(r3_neg, r1)
R_neg = np.vstack([r1, r2_neg, r3_neg])
# Compare K @ R and rho M[:, :3].
if np.linalg.norm(K @ R_pos - rho * M[:, :3]) < np.linalg.norm(K @ R_neg + rho * M[:, :3]):
R = R_pos
else:
R = R_neg
rho = -rho
T = rho * np.linalg.inv(K) @ M[:, 3]
info = {
'K': ndarray(K).copy(),
'R': ndarray(R).copy(),
'T': ndarray(T).copy(),
'alpha': alpha,
'beta': beta,
'theta': theta,
'cx': cx,
'cy': cy,
}
return info
def solve_simple_camera(points_in_pixel, points_in_world):
# The pixel space is:
# - Origin: lower left.
# - x: left to right.
# - y: bottom to top.
# Let p and P be points_in_pixel (2D) and points_in_world (3D), respectively.
# Let R and t be the orientation and location of the world frame in the camera frame.
# T = [R, t]
# [0, 1]
# K = [alpha, 0, img_width / 2, 0]
# [0, alpha, img_height / 2, 0]
# [0, 0, 1, 0]
# Pixels: alpha * x / z + cx
# alpha * y / z + cy
# [p, 1] = Homogenous(KT[P, 1]).
# Let M = KT \in R^{3 x 4} = [m1, m2, m3]
# p.x = <m1, [P, 1]> / <m3, [P, 1]>.
# p.y = <m2, [P, 1]> / <m3, [P, 1]>.
# p.x * <m3, [P, 1]> - <m1, [P, 1]> = 0.
# p.y * <m3, [P, 1]> - <m2, [P, 1]> = 0.
# Let's flatten them into a linear system.
points_in_pixel = ndarray(points_in_pixel).copy()
points_in_pixel[:, 0] -= img_width / 2
points_in_pixel[:, 1] -= img_height / 2
points_in_world = ndarray(points_in_world).copy()
num_points = points_in_pixel.shape[0]
assert (num_points, 2) == points_in_pixel.shape
assert (num_points, 3) == points_in_world.shape
P = ndarray(np.zeros((2 * num_points, 12)))
for i in range(num_points):
# Assemble the x equation.
# m1:
P[2 * i, :3] = -points_in_world[i]
P[2 * i, 3] = -1
# m3:
P[2 * i, 8:11] = points_in_world[i] * points_in_pixel[i, 0]
P[2 * i, 11] = points_in_pixel[i, 0]
# Assemble the y equation.
# m2:
P[2 * i + 1, 4:7] = -points_in_world[i]
P[2 * i + 1, 7] = -1
# m3:
P[2 * i + 1, 8:11] = points_in_world[i] * points_in_pixel[i, 1]
P[2 * i + 1, 11] = points_in_pixel[i, 1]
# Now m can be obtained from P * m = 0.
# We solve this by minimizing \|P * m\|^2 s.t. \|m\|^2 = 1.
# Consider SVD of P: P = U * Sigma * V.T.
U, Sigma, Vt = np.linalg.svd(P)
# U @ np.diag(Sigma) @ Vt = P.
# So, Vt * m = [0, 0, 0, ..., 1], or m = V * [0, 0, 0, ..., 1].
m = Vt[-1]
M = ndarray(np.reshape(m, (3, 4)))
# Now we know M = 1 / rho * KT. Let's extract camera parameters.
# M = 1 / rho * [alpha, 0, 0] * [R, t]
# [0, alpha, 0]
# [0, 0, 1]
a1 = M[0, :3]
a2 = M[1, :3]
a3 = M[2, :3]
# |rho| * |a3| = 1.
rho_pos = 1 / np.linalg.norm(a3)
rho_neg = -rho_pos
info = None
error = np.inf
for rho in (rho_pos, rho_neg):
KR = rho * M[:, :3]
alpha0 = np.linalg.norm(KR[0])
alpha1 = np.linalg.norm(KR[1])
assert np.isclose(alpha0, alpha1, rtol=0.1)
alpha = (alpha0 + alpha1) / 2
R_est = np.copy(KR)
R_est[0] /= alpha
R_est[1] /= alpha
U, Sig, Vt = np.linalg.svd(R_est)
assert np.allclose(U @ np.diag(Sig) @ Vt, R_est)
assert np.allclose(Sig, [1, 1, 1], rtol=0.5)
R = U @ Vt
K = np.diag([alpha, alpha, 1])
t = np.linalg.inv(K) @ M[:, 3] * rho
e = np.linalg.norm(np.hstack([K @ R, (K @ t)[:, None]]) / rho - M)
if e < error:
info = {
'K': ndarray([[alpha, 0, img_width / 2],
[0, alpha, img_height / 2],
[0, 0, 1]]),
'R': ndarray(R).copy(),
'T': ndarray(t).copy(),
'alpha': alpha,
'beta': alpha,
'theta': np.pi / 2,
'cx': img_width / 2,
'cy': img_height / 2
}
error = e
return info
# Input:
# - image_data: H x W x 3 ndarray.
# Output:
# - M x 2 pixel coordinates and M x 3 3D coordinates in the world space.
# The world frame is defined as follows:
# - origin: lower left corner of the table.
# - x: left to right.
# - y: bottom to top.
# - z: pointing up from the table surface.
points_in_pixel = []
points_in_world_space = []
last_img_x = None
last_img_y = None
def select_corners(image_data):
global points_in_pixel
global points_in_world_space
global last_img_x
global last_img_y
points_in_pixel = []
points_in_world_space = []
last_img_x = -1
last_img_y = -1
fig = plt.figure()
ax_img = fig.add_subplot(211)
ax_img.imshow(image_data)
# The flat sheet.
ax_table = fig.add_subplot(212)
ax_table.set_xlabel('x')
ax_table.set_ylabel('y')
# We know the 3D coordinates of the table and the billiard box.
table_corners = ndarray([
[0, 0, 0],
[1.10, 0, 0],
[1.10, 0.67, 0],
[0, 0.67, 0]
])
billiard_box_top_corners = ndarray([
[0, 0.67 - 0.056, 0.245],
[0.245, 0.67 - 0.056, 0.245],
[0.245, 0.67, 0.245],
[0, 0.67, 0.245]
])
billiard_box_bottom_corners = ndarray([
[0, 0.67 - 0.056, 0],
[0.245, 0.67 - 0.056, 0],
[0.245, 0.67, 0],
[0, 0.67, 0]
])
billiard_box_top_corners_proxy = | np.copy(billiard_box_top_corners) | numpy.copy |
#! /usr/bin/env python
# encoding: utf-8
import click
import numpy as np
from .download_input import get_input
def hexgrid_1(movements):
direction_indices = {"n": 0, "ne": 1, "nw": 2,
"s": 3, "se": 4, "sw": 5}
movements = [direction_indices[move] for move in movements.split(",")]
directions = np.array([(1, 0), (0, 1), (1, -1),
(-1, 0), (-1, 1), (0, -1)])
sum_of_directions = np.sum(directions[movements], axis=0)
steps = np.abs(np.sum(sum_of_directions))
signs = np.sign(sum_of_directions)
if signs[0] != signs[1]:
steps += np.abs(sum_of_directions).min()
return steps
def hexgrid_2(movements):
direction_indices = {"n": 0, "ne": 1, "nw": 2,
"s": 3, "se": 4, "sw": 5}
movements = [direction_indices[move] for move in movements.split(",")]
directions = np.array([(1, 0), (0, 1), (1, -1),
(-1, 0), (-1, 1), (0, -1)])
sum_of_directions = np.cumsum(directions[movements], axis=0)
steps = np.abs(np.sum(sum_of_directions, axis=1))
signs = | np.sign(sum_of_directions) | numpy.sign |
import numpy as np
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1
]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1
]
def chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1, len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def i0(x):
x = np.abs(x)
return np.exp(x) * np.piecewise(x, [x<=8.0], [lambda x1: chbevl(x1/2.0-2, _i0A), lambda x1: chbevl(32.0/x1 - 2.0, _i0B) / np.sqrt(x1)])
def kaiser_window(N, beta):
n = np.arange(0, N)
alpha = (N - 1) / 2.0
return i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) / i0(beta)
def kaiser_beta(a):
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_parameters(ripple, width):
'''
ripple - Both passband and stopband ripple strength in dB.
width - Difference between fs (stopband frequency) i fp (passband frequency). Normalized so that 1 corresponds to pi radians / sample. That is, the frequency is expressed as a fraction of the Nyquist frequency.
'''
a = abs(ripple)
beta = kaiser_beta(a)
numtaps = (a - 7.95) / 2.285 / (np.pi * width) + 1
return int(np.ceil(numtaps)), beta
def lowpass_kaiser_fir_filter(rate=16000, cutoff_freq=4000, width=400, attenuation=65):
'''
rate - Signal sampling rate.
cuttof_freq - Filter cutoff frequency in Hz.
width - Difference between fs (stopband frequency) i fp (passband frequency) in Hz.
attenuation - Signal attenuation in the stopband, given in dB.
Returns: h(n) - impulse response of lowpass sinc filter with applied Kaiser window.
'''
nyq = rate / 2
cutoff_freq = cutoff_freq / nyq
numtaps, beta = kaiser_parameters(attenuation, float(width) / nyq)
if numtaps % 2 == 0:
numtaps += 1
pass_zero = True # zato sto je lowpass
pass_nyq = False # zato sto je lowpass
cutoff = | np.hstack(([0.0]*pass_zero, cutoff_freq, [1.0]*pass_nyq)) | numpy.hstack |
def CoulogCC(mbeam,Zbeam, mi, Zi, ni, xi,b):
import numpy as np
import const as c
sqrtpie2 = np.sqrt(np.pi/c.e2)
hbc2 = 6.1992097e-05 # hbar c /2 in units of eV cm
u = (1-1/ | np.sqrt(xi) | numpy.sqrt |
#!/usr/bin/env python
#
# test_fsl_ents.py -
#
# Author: <NAME> <<EMAIL>>
#
import sys
import numpy as np
import pytest
import fsl.utils.tempdir as tempdir
import fsl.scripts.fsl_ents as extn
def test_genComponentIndexList():
with tempdir.tempdir():
# sequence of 1-indexed integers/file paths
icomps = [1, 5, 28, 12, 42, 54]
fcomps1 = [1, 4, 6, 3, 7]
fcomps2 = [12, 42, 31, 1, 4, 8]
with open('comps1.txt', 'wt') as f:
f.write(','.join([str(l) for l in fcomps1]))
with open('comps2.txt', 'wt') as f:
f.write(','.join([str(l) for l in fcomps2]))
ncomps = 60
comps = icomps + ['comps1.txt', 'comps2.txt']
expcomps = list(sorted(set(icomps + fcomps1 + fcomps2)))
expcomps = [c - 1 for c in expcomps]
assert extn.genComponentIndexList(comps, ncomps) == expcomps
with pytest.raises(ValueError):
extn.genComponentIndexList(comps + [-1], 60)
with pytest.raises(ValueError):
extn.genComponentIndexList(comps, 40)
def test_loadConfoundFiles():
with tempdir.tempdir():
npts = 50
confs = [
np.random.randint(1, 100, (50, 10)),
np.random.randint(1, 100, (50, 1)),
np.random.randint(1, 100, (50, 5))]
badconfs = [
np.random.randint(1, 100, (40, 10)),
np.random.randint(1, 100, (60, 10))]
expected = np.empty((50, 16), dtype=np.float64)
expected[:, :] = np.nan
expected[:, :10] = confs[0]
expected[:, 10:11] = confs[1]
expected[:, 11:16] = confs[2]
conffiles = []
for i, c in enumerate(confs):
fname = 'conf{}.txt'.format(i)
conffiles.append(fname)
np.savetxt(fname, c)
result = extn.loadConfoundFiles(conffiles, npts)
amask = ~np.isnan(expected)
assert np.all(~np.isnan(result) == amask)
assert np.all(result[amask] == expected[amask])
assert np.all(result[amask] == expected[amask])
badconfs = [
| np.random.randint(1, 100, (40, 10)) | numpy.random.randint |
"""
Core functionality for feature computation
<NAME>
Copyright (c) 2021. Pfizer Inc. All rights reserved.
"""
from abc import ABC, abstractmethod
from collections.abc import Iterator, Sequence
import json
from warnings import warn
from pandas import DataFrame
from numpy import float_, asarray, zeros, sum, moveaxis
__all__ = ["Bank"]
class ArrayConversionError(Exception):
pass
def get_n_feats(size, index):
if isinstance(index, int):
return 1
elif isinstance(index, (Iterator, Sequence)):
return len(index)
elif isinstance(index, slice):
return len(range(*index.indices(size)))
elif isinstance(index, type(Ellipsis)):
return size
def partial_index_check(index):
if index is None:
index = ...
if not isinstance(index, (int, Iterator, Sequence, type(...), slice)):
raise IndexError(f"Index type ({type(index)}) not understood.")
if isinstance(index, str):
raise IndexError("Index type (str) not understood.")
return index
def normalize_indices(nfeat, index):
if index is None:
return [...] * nfeat
elif not isinstance(index, (Iterator, Sequence)): # slice, single integer, etc
return [partial_index_check(index)] * nfeat
elif all([isinstance(i, int) for i in index]): # iterable of ints
return [index] * nfeat
elif isinstance(index, Sequence): # able to be indexed
return [partial_index_check(i) for i in index]
else: # pragma: no cover
return IndexError(f"Index type ({type(index)}) not understood.")
def normalize_axes(ndim, axis, ind_axis):
"""
Normalize input axes to be positive/correct for how the swapping has to work
"""
if axis == ind_axis:
raise ValueError("axis and index_axis cannot be the same")
if ndim == 1:
return 0, None
elif ndim >= 2:
"""
| shape | ax | ia | move1 | ax | ia | res | ax | ia | res move |
|--------|----|----|--------|----|----|-------|----|----|----------|
| (a, b) | 0 | 1 | (b, a) | 0 | 0 | (bf,) | | | |
| (a, b) | 0 | N | (b, a) | 0 | N | (f, b)| | | |
| (a, b) | 1 | 0 | | | | (3a,) | | | |
| (a, b) | 1 | N | | | | (f, a)| | | |
| shape | ax| ia | move1 | ax| ia| move2 | res | | ia| res move |
|----------|---|------|----------|---|---|----------|----------|----|---|----------|
| (a, b, c)| 0 | 1(0) | (b, c, a)| | | | (bf, c) | 0 | 0 | |
| (a, b, c)| 0 | 2(1) | (b, c, a)| | 1 | (c, b, a)| (cf, b) | 0 | 1 | (b, cf) |
| (a, b, c)| 0 | N | (b, c, a)| | | | (f, b, c)| | | |
| (a, b, c)| 1 | 0 | (a, c, b)| | | | (af, c) | 0 | 0 | |
| (a, b, c)| 1 | 2(1) | (a, c, b)| | 1 | (c, a, b)| (cf, a) | 0 | 1 | (a, cf) |
| (a, b, c)| 1 | N | (a, c, b)| | | | (f, a, c)| | | |
| (a, b, c)| 2 | 0 | (a, b, c)| | | | (af, b) | 0 | 0 | |
| (a, b, c)| 2 | 1 | (a, b, c)| | 1 | (b, a, c)| (bf, a) | 0 | 1 | (a, bf) |
| (a, b, c)| 2 | N | (a, b, c)| | | | (f, a, b)| | | |
| shape | ax| ia | move1 | ia| move2 | res | | ia| res move |
|------------|---|------|-------------|---|-------------|-------------|---|---|-----------|
|(a, b, c, d)| 0 | 1(0) | (b, c, d, a)| | | (bf, c, d) | 0 | 0 | |
|(a, b, c, d)| 0 | 2(1) | (b, c, d, a)| 1 | (c, b, d, a)| (cf, b, d) | 0 | 1 | (b, cf, d)|
|(a, b, c, d)| 0 | 3(2) | (b, c, d, a)| 2 | (d, b, c, a)| (df, b, c) | 0 | 2 | (d, c, df)|
|(a, b, c, d)| 0 | N | (b, c, d, a)| | | (f, b, c, d)| | | |
|(a, b, c, d)| 1 | 0 | (a, c, d, b)| | | (af, c, d) | | | |
|(a, b, c, d)| 1 | 2(1) | (a, c, d, b)| 1 | (c, a, d, b)| (cf, a, d) | 0 | 1 | (a, cf, d)|
|(a, b, c, d)| 1 | 3(2) | (a, c, d, b)| 2 | (d, a, c, b)| (df, a, c) | 0 | 2 | (a, c, df)|
|(a, b, c, d)| 1 | N | (a, c, d, b)| | | (f, a, c, d)| | | |
|(a, b, c, d)| 2 | 0 | (a, b, d, c)| | | (af, b, d) | | | |
|(a, b, c, d)| 2 | 1 | (a, b, d, c)| 1 | (b, a, d, c)| (bf, a, d) | 0 | 1 | (a, bf, d)|
|(a, b, c, d)| 2 | 3(2) | (a, b, d, c)| 2 | (d, a, b, c)| (df, a, b) | 0 | 2 | (a, b, df)|
|(a, b, c, d)| 2 | N | (a, b, d, c)| | | (f, a, b, d)| | | |
|(a, b, c, d)| 3 | 0 | (a, b, c, d)| | | (af, b, c) | | | |
|(a, b, c, d)| 3 | 1 | (a, b, c, d)| 1 | (b, a, c, d)| (bf, a, c) | 0 | 1 | (a, bf, c)|
|(a, b, c, d)| 3 | 2 | (a, b, c, d)| 2 | (c, a, b, d)| (cf, a, b) | 0 | 2 | (a, b, cf)|
|(a, b, c, d)| 3 | N | (a, b, c, d)| | | (f, a, b, c)| | | |
"""
ax = axis if axis >= 0 else ndim + axis
if ind_axis is None:
return ax, None
ia = ind_axis if ind_axis >= 0 else ndim + ind_axis
if ia > ax:
ia -= 1
return ax, ia
class Bank:
"""
A feature bank object for ease in creating a table or pipeline of features to be computed.
Parameters
----------
bank_file : {None, path-like}, optional
Path to a saved bank file to load. Optional
Examples
--------
"""
__slots__ = ("_feats", "_indices")
def __str__(self):
return "Bank"
def __repr__(self):
s = "Bank["
for f in self._feats:
s += f"\n\t{f!r},"
s += "\n]"
return s
def __contains__(self, item):
return item in self._feats
def __len__(self):
return len(self._feats)
def __init__(self, bank_file=None):
# initialize some variables
self._feats = []
self._indices = []
if bank_file is not None:
self.load(bank_file)
def add(self, features, index=None):
"""
Add a feature or features to the pipeline.
Parameters
----------
features : {Feature, list}
Single signal Feature, or list of signal Features to add to the feature Bank
index : {int, slice, list}, optional
Index to be applied to data input to each features. Either a index that will
apply to every feature, or a list of features corresponding to each feature being
added.
"""
if isinstance(features, Feature):
if features in self:
warn(
f"Feature {features!s} already in the Bank, will be duplicated.",
UserWarning,
)
self._indices.append(partial_index_check(index))
self._feats.append(features)
elif all([isinstance(i, Feature) for i in features]):
if any([ft in self for ft in features]):
warn("Feature already in the Bank, will be duplicated.", UserWarning)
self._indices.extend(normalize_indices(len(features), index))
self._feats.extend(features)
def save(self, file):
"""
Save the feature Bank to a file for a persistent object that can be loaded later to create
the same Bank as before
Parameters
----------
file : path-like
File to be saved to. Creates a new file or overwrites an existing file.
"""
out = []
for i, ft in enumerate(self._feats):
idx = "Ellipsis" if self._indices[i] is Ellipsis else self._indices[i]
out.append(
{ft.__class__.__name__: {"Parameters": ft._params, "Index": idx}}
)
with open(file, "w") as f:
json.dump(out, f)
def load(self, file):
"""
Load a previously saved feature Bank from a json file.
Parameters
----------
file : path-like
File to be read to create the feature Bank.
"""
# the import must be here, otherwise a circular import error occurs
from skdh.features import lib
with open(file, "r") as f:
feats = json.load(f)
for ft in feats:
name = list(ft.keys())[0]
params = ft[name]["Parameters"]
index = ft[name]["Index"]
if index == "Ellipsis":
index = Ellipsis
# add it to the feature bank
self.add(getattr(lib, name)(**params), index=index)
def compute(
self, signal, fs=1.0, *, axis=-1, index_axis=None, indices=None, columns=None
):
"""
Compute the specified features for the given signal
Parameters
----------
signal : {array-like}
Array-like signal to have features computed for.
fs : float, optional
Sampling frequency in Hz. Default is 1Hz
axis : int, optional
Axis along which to compute the features. Default is -1.
index_axis : {None, int}, optional
Axis corresponding to the indices specified in `Bank.add` or `indices`. Default is
None, which assumes that this axis is not part of the signal. Note that setting this to
None means values for `indices` or the indices set in `Bank.add` will be ignored.
indices : {None, int, list-like, slice, ellipsis}, optional
Indices to apply to the input signal. Either None, a integer, list-like, slice to apply
to each feature, or a list-like of lists/objects with a 1:1 correspondence to the
features present in the Bank. If provided, takes precedence over any values given in
`Bank.add`. Default is None, which will use indices from `Bank.add`.
columns : {None, list}, optional
Columns to use if providing a dataframe. Default is None (uses all columns).
Returns
-------
feats : numpy.ndarray
Computed features.
"""
# standardize the input signal
if isinstance(signal, DataFrame):
columns = columns if columns is not None else signal.columns
x = signal[columns].values.astype(float_)
else:
try:
x = asarray(signal, dtype=float_)
except ValueError as e:
raise ArrayConversionError("Error converting signal to ndarray") from e
axis, index_axis = normalize_axes(x.ndim, axis, index_axis)
if index_axis is None:
indices = [...] * len(self)
else:
if indices is None:
indices = self._indices
else:
indices = normalize_indices(len(self), indices)
# get the number of features that will results. Needed to allocate the feature array
if index_axis is None:
# don't have to move any other axes than the computation axis
x = moveaxis(x, axis, -1)
# number of feats is 1 per
n_feats = [1] * len(self)
feats = zeros((sum(n_feats),) + x.shape[:-1], dtype=float_)
else:
# move both the computation and index axis. do this in two steps to allow for undoing
# just the index axis swap later. The index_axis has been adjusted appropriately
# to match this axis move in 2 steps
x = moveaxis(x, axis, -1)
x = | moveaxis(x, index_axis, 0) | numpy.moveaxis |
# coding: utf-8
""" demo using GREIT """
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division, absolute_import, print_function
import numpy as np
import matplotlib.pyplot as plt
import pyeit.mesh as mesh
from pyeit.eit.fem import EITForward
import pyeit.eit.protocol as protocol
from pyeit.mesh.shape import thorax
import pyeit.eit.greit as greit
from pyeit.mesh.wrapper import PyEITAnomaly_Circle
""" 0. construct mesh """
n_el = 16 # nb of electrodes
use_customize_shape = False
if use_customize_shape:
# Mesh shape is specified with fd parameter in the instantiation, e.g : fd=thorax
mesh_obj = mesh.create(n_el, h0=0.1, fd=thorax)
else:
mesh_obj = mesh.create(n_el, h0=0.1)
# extract node, element, alpha
pts = mesh_obj.node
tri = mesh_obj.element
""" 1. problem setup """
# this step is not needed, actually
# mesh_0 = mesh.set_perm(mesh_obj, background=1.0)
# test function for altering the 'permittivity' in mesh
anomaly = [
PyEITAnomaly_Circle(center=[0.4, 0], r=0.1, perm=10.0),
PyEITAnomaly_Circle(center=[-0.4, 0], r=0.1, perm=10.0),
PyEITAnomaly_Circle(center=[0, 0.5], r=0.1, perm=0.1),
PyEITAnomaly_Circle(center=[0, -0.5], r=0.1, perm=0.1),
]
mesh_new = mesh.set_perm(mesh_obj, anomaly=anomaly, background=1.0)
delta_perm = | np.real(mesh_new.perm - mesh_obj.perm) | numpy.real |
from evalutils.exceptions import ValidationError
from evalutils.io import CSVLoader, FileLoader, ImageLoader
import json
import nibabel as nib
import numpy as np
import os.path
from pathlib import Path
from pandas import DataFrame, MultiIndex
import scipy.ndimage
from scipy.ndimage.interpolation import map_coordinates, zoom
from surface_distance import *
##### paths #####
DEFAULT_INPUT_PATH = Path("/input/")
DEFAULT_GROUND_TRUTH_PATH = Path("/opt/evaluation/ground-truth/")
DEFAULT_EVALUATION_OUTPUT_FILE_PATH = Path("/output/metrics.json")
##### metrics #####
def jacobian_determinant(disp):
_, _, H, W, D = disp.shape
gradx = np.array([-0.5, 0, 0.5]).reshape(1, 3, 1, 1)
grady = np.array([-0.5, 0, 0.5]).reshape(1, 1, 3, 1)
gradz = | np.array([-0.5, 0, 0.5]) | numpy.array |
import argparse
import os
import sys
import numpy as np
import pandas as pd
from itertools import compress
import copy
import deTiN_utilities as du
import deTiN_SSNV_based_estimate as dssnv
import deTiN_aSCNA_based_estimate as dascna
import numpy.ma as ma
class input:
"""class which holds the required detin somatic data prior to model"""
def __init__(self, args, ascna_probe_number_filter=200, ascna_SNP_number_filter=20, coverage_threshold=15,
SSNV_af_threshold=0.15, aSCNA_variance_threshold=0.025):
# related to inputs from command line
self.call_stats_file = args.mutation_data_path
self.seg_file = args.cn_data_path
self.tumor_het_file = args.tumor_het_data_path
self.normal_het_file = args.normal_het_data_path
self.exac_db_file = args.exac_data_path
self.indel_file = args.indel_data_path
self.indel_type = args.indel_data_type
self.only_ascnas = args.only_ascnas
if type(args.weighted_classification):
self.weighted_classification = bool(args.weighted_classification)
else:
self.weighted_classification = args.weighted_classification
if type(args.mutation_prior) == str:
self.mutation_prior = float(args.mutation_prior)
else:
self.mutation_prior = args.mutation_prior
if type(args.TiN_prior) == str:
self.TiN_prior = float(args.TiN_prior)
else:
self.TiN_prior = args.TiN_prior
if type(args.resolution) == str:
self.resolution = int(args.resolution)
else:
self.resolution = args.resolution
self.output_path = args.output_dir
self.output_name = args.output_name
if type(args.use_outlier_removal) == str:
if args.use_outlier_removal.lower() == 'false':
self.use_outlier_removal = False
else:
self.use_outlier_removal = True
else:
self.use_outlier_removal = args.use_outlier_removal
if type(args.aSCNA_threshold) == str:
self.aSCNA_thresh = float(args.aSCNA_threshold)
else:
self.aSCNA_thresh = args.aSCNA_threshold
try:
self.ascna_probe_number_filter = float(args.ascna_probe_number_filter)
except AttributeError:
self.ascna_probe_number_filter = ascna_probe_number_filter
try:
self.ascna_SNP_number_filter = float(args.ascna_SNP_number_filter)
except AttributeError:
self.ascna_SNP_number_filter = ascna_SNP_number_filter
try:
self.coverage_threshold = float(args.coverage_threshold)
except AttributeError:
self.coverage_threshold = coverage_threshold
try:
self.SSNV_af_threshold = float(args.SSNV_af_threshold)
except AttributeError:
self.SSNV_af_threshold = SSNV_af_threshold
try:
self.aSCNA_variance_threshold = float(args.aSCNA_variance_threshold)
except AttributeError:
self.aSCNA_variance_threshold = aSCNA_variance_threshold
try:
self.CancerHotSpotsBED = args.cancer_hot_spots
except AttributeError:
self.aSCNA_variance_threshold = 'NA'
# related to inputs from class functions
self.call_stats_table = []
self.seg_table = []
self.het_table = []
self.candidates = []
self.indel_table = []
self.skew = 0.5
def read_call_stats_file(self):
fields = ['contig', 'position', 'ref_allele', 'alt_allele', 'tumor_name', 'normal_name', 't_alt_count',
't_ref_count'
, 'n_alt_count', 'n_ref_count', 'failure_reasons', 'judgement','t_lod_fstar']
fields_type = {'contig': str, 'position': np.int, 'ref_allele': str, 'alt_allele': str, 'tumor_name': str,
'normal_name': str,
't_alt_count': np.int, 't_ref_count': np.int, 'n_alt_count': np.int, 'n_ref_count': np.int,
'failure_reasons': str, 'judgement': str}
try:
self.call_stats_table = pd.read_csv(self.call_stats_file, '\t', index_col=False,
comment='#', usecols=fields, dtype=fields_type)
except (ValueError, LookupError):
try:
fields = ['contig', 'position', 'ref_allele', 'alt_allele', 'tumor_name', 'normal_name', 't_alt_count',
't_ref_count'
, 'n_alt_count', 'n_ref_count', 'failure_reasons', 'judgement']
self.call_stats_table = pd.read_csv(self.call_stats_file, '\t', index_col=False,
comment='#', usecols=fields, dtype=fields_type)
except (ValueError, LookupError):
print('Error reading call stats skipping first two rows and trying again')
self.call_stats_table = pd.read_csv(self.call_stats_file, '\t', index_col=False,
comment='#', skiprows=2, usecols=fields, dtype=fields_type)
if type(self.call_stats_table['contig'][0]) == str:
self.call_stats_table['Chromosome'] = du.chr2num(np.array(self.call_stats_table['contig']))
else:
self.call_stats_table['Chromosome'] = np.array(self.call_stats_table['contig']) - 1
self.call_stats_table = self.call_stats_table[np.isfinite(self.call_stats_table['Chromosome'])]
self.call_stats_table['genomic_coord_x'] = du.hg19_to_linear_positions(
np.array(self.call_stats_table['Chromosome']), np.array(self.call_stats_table['position']))
self.n_calls_in = len(self.call_stats_table)
self.call_stats_table.reset_index(inplace=True, drop=True)
def read_het_file(self):
t_het_header = du.read_file_header(self.tumor_het_file)
n_het_header = du.read_file_header(self.normal_het_file)
cols_t_type = {t_het_header[0]: str}
cols_n_type = {n_het_header[0]: str}
tumor_het_table = pd.read_csv(self.tumor_het_file, '\t', index_col=False, low_memory=False, comment='#',
dtype=cols_t_type)
normal_het_table = pd.read_csv(self.normal_het_file, '\t', index_col=False, low_memory=False, comment='#',
dtype=cols_n_type)
tumor_het_table = du.fix_het_file_header(tumor_het_table)
normal_het_table = du.fix_het_file_header(normal_het_table)
tumor_het_table['Chromosome'] = du.chr2num(np.array(tumor_het_table['CONTIG']))
normal_het_table['Chromosome'] = du.chr2num(np.array(normal_het_table['CONTIG']))
tumor_het_table = tumor_het_table[np.isfinite(tumor_het_table['Chromosome'])]
tumor_het_table['genomic_coord_x'] = du.hg19_to_linear_positions(np.array(tumor_het_table['Chromosome']),
np.array(tumor_het_table['POSITION']))
normal_het_table = normal_het_table[np.isfinite(normal_het_table['Chromosome'])]
normal_het_table['genomic_coord_x'] = du.hg19_to_linear_positions(np.array(normal_het_table['Chromosome']),
np.array(normal_het_table['POSITION']))
tumor_het_table['AF'] = np.true_divide(tumor_het_table['ALT_COUNT'],
tumor_het_table['ALT_COUNT'] + tumor_het_table['REF_COUNT'])
normal_het_table['AF'] = np.true_divide(normal_het_table['ALT_COUNT'],
normal_het_table['ALT_COUNT'] + normal_het_table['REF_COUNT'])
self.het_table = pd.merge(normal_het_table, tumor_het_table, on='genomic_coord_x', suffixes=('_N', '_T'))
def read_seg_file(self):
if self.seg_file == 'NULL':
self.seg_table = pd.DataFrame(index=[0],columns=['Chromosome','Start.bp','End.bp','n_probes','length','f','tau','genomic_coord_start','genomic_coord_end'])
self.het_table = pd.DataFrame(index=[0],columns=['seg_id','tau','f','d','AF_T','AF_N','Chromosome','genomic_coord_x','ALT_COUNT_N'
'ALT_COUNT_T','REF_COUNT_N','REF_COUNT_T'])
else:
seg_header = du.read_file_header(self.seg_file)
cols_seg_type = {seg_header[0]: str}
self.seg_table = pd.read_csv(self.seg_file, '\t', index_col=False, low_memory=False, comment='#',
dtype=cols_seg_type)
self.seg_table = du.fix_seg_file_header(self.seg_table)
self.seg_table['Chromosome'] = du.chr2num(np.array(self.seg_table['Chromosome']))
self.seg_table['genomic_coord_start'] = du.hg19_to_linear_positions(np.array(self.seg_table['Chromosome']),
np.array(self.seg_table['Start.bp']))
self.seg_table['genomic_coord_end'] = du.hg19_to_linear_positions(np.array(self.seg_table['Chromosome']),
np.array(self.seg_table['End.bp']))
def annotate_call_stats_with_allelic_cn_data(self):
f_acs = np.zeros([self.n_calls_in, 1]) + 0.5
tau = np.zeros([self.n_calls_in, 1]) + 2
for i, r in self.seg_table.iterrows():
f_acs[np.logical_and(np.array(self.call_stats_table['genomic_coord_x']) >= r['genomic_coord_start'],
np.array(self.call_stats_table['genomic_coord_x']) <= r['genomic_coord_end'])] = r.f
tau[np.logical_and(np.array(self.call_stats_table['genomic_coord_x']) >= r['genomic_coord_start'],
np.array(self.call_stats_table['genomic_coord_x']) <= r[
'genomic_coord_end'])] = r.tau + 0.001
self.call_stats_table['tau'] = tau
self.call_stats_table['f_acs'] = f_acs
def annotate_het_table(self):
seg_id = np.zeros([len(self.het_table), 1]) - 1
tau = np.zeros([len(self.het_table), 1]) + 2
f = np.zeros([len(self.het_table), 1]) + 0.5
for seg_index, seg in self.seg_table.iterrows():
het_index = np.logical_and(self.het_table['genomic_coord_x'] >= seg['genomic_coord_start'],
self.het_table['genomic_coord_x'] <= seg['genomic_coord_end'])
ix = list(compress(range(len(het_index)), het_index))
seg_id[ix] = seg_index
tau[ix] = seg['tau']
f[ix] = seg['f']
self.het_table['seg_id'] = seg_id
self.het_table['tau'] = tau
self.het_table['f'] = f
d = np.ones([len(self.het_table), 1])
d[np.array(self.het_table['AF_T'] <= 0.5, dtype=bool)] = -1
self.skew = 0.5
self.het_table['d'] = d
def read_and_preprocess_SSNVs(self):
self.read_call_stats_file()
self.read_seg_file()
self.annotate_call_stats_with_allelic_cn_data()
if not self.indel_file == 'None':
if not self.indel_type == 'None':
self.indel_table = du.read_indel_vcf(self.indel_file, self.seg_table, self.indel_type)
else:
print('Warning: if indels are provided you must also specify indel data source using --indel_data_type')
print('no indels will be returned')
self.indel_file = 'None'
self.indel_type = 'None'
def read_and_preprocess_aSCNAs(self):
self.read_seg_file()
self.read_het_file()
self.seg_table = du.filter_segments_based_on_size_f_and_tau(self.seg_table, self.aSCNA_thresh,
self.ascna_probe_number_filter)
self.annotate_het_table()
self.het_table = du.remove_sites_near_centromere_and_telomeres(self.het_table)
def read_and_preprocess_data(self):
self.read_and_preprocess_SSNVs()
self.read_and_preprocess_aSCNAs()
class output:
""" combined from deTiN's models
reclassified SSNVs based on TiN estimate are labeled KEEP in judgement column
self.SSNVs['judgement'] == KEEP
confidence intervals (CI_tin_high/low) represent 95% interval
"""
def __init__(self, input, ssnv_based_model, ascna_based_model):
# previous results
self.input = input
self.ssnv_based_model = ssnv_based_model
self.ascna_based_model = ascna_based_model
# useful outputs
self.SSNVs = input.candidates
self.joint_log_likelihood = np.zeros([self.input.resolution, 1])
self.joint_posterior = np.zeros([self.input.resolution, 1])
self.CI_tin_high = []
self.CI_tin_low = []
self.TiN = []
self.p_null = 1
# variables
self.TiN_range = np.linspace(0, 1, num=self.input.resolution)
self.TiN_int = 0
# threshold for accepting variants based on the predicted somatic assignment
# if p(S|TiN) exceeds threshold we keep the variant.
self.threshold = 0.5
# defines whether to remove events based on predicted exceeding predicted allele fractions
# if Beta_cdf(predicted_normal_af;n_alt_count+1,n_ref_count+1) <= 0.01 we remove the variant
self.use_outlier_threshold = input.use_outlier_removal
if self.input.indel_file != 'None':
if self.input.indel_table.isnull().values.sum() == 0:
self.indels = self.input.indel_table
def calculate_joint_estimate(self):
# do not use SSNV based estimate if it exceeds 0.3 (this estimate can be unreliable at high TiNs due to
# germline events)
if self.ssnv_based_model.TiN <= 0.3 and ~np.isnan(self.ascna_based_model.TiN):
if len(self.ascna_based_model.centroids) > 1:
reselect_cluster = np.argmin(np.abs(self.ascna_based_model.centroids / 100 - self.ssnv_based_model.TiN))
self.ascna_based_model.TiN_likelihood = self.ascna_based_model.cluster_TiN_likelihoods[reselect_cluster]
print('reselected cluster based on SSNVs')
# combine independent likelihoods
self.joint_log_likelihood = self.ascna_based_model.TiN_likelihood + self.ssnv_based_model.TiN_likelihood
# normalize likelihood to calculate posterior
self.joint_posterior = np.exp(self.ascna_based_model.TiN_likelihood + self.ssnv_based_model.TiN_likelihood
- np.nanmax(
self.ascna_based_model.TiN_likelihood + self.ssnv_based_model.TiN_likelihood))
self.joint_posterior = np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))
self.CI_tin_low = self.TiN_range[next(x[0] for x in enumerate(
np.cumsum(np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.025)]
self.CI_tin_high = self.TiN_range[
next(x[0] for x in enumerate(np.cumsum(
np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.975)]
self.TiN_int = np.nanargmax(self.joint_posterior)
self.TiN = self.TiN_range[self.TiN_int]
zero_tin_ssnv_model = copy.deepcopy(self.ssnv_based_model)
zero_tin_ssnv_model.TiN = 0
zero_tin_ssnv_model.expectation_of_z_given_TiN()
zero_tin_ssnv_model.maximize_TiN_likelihood()
zero_total_l = zero_tin_ssnv_model.TiN_likelihood + self.ascna_based_model.TiN_likelihood
zero_total_l = np.exp(zero_total_l - np.nanmax(zero_total_l))
self.p_null = np.true_divide(zero_total_l,np.nansum(zero_total_l))[0]
print('joint TiN estimate = ' + str(self.TiN))
# use only ssnv based model
elif ~np.isnan(self.ascna_based_model.TiN):
# otherwise TiN estimate is = to aSCNA estimate
print('SSNV based TiN estimate exceed 0.3 using only aSCNA based estimate')
self.joint_log_likelihood = self.ascna_based_model.TiN_likelihood
self.joint_posterior = np.exp(
self.ascna_based_model.TiN_likelihood - np.nanmax(self.ascna_based_model.TiN_likelihood))
self.joint_posterior = np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))
self.CI_tin_low = self.TiN_range[next(x[0] for x in enumerate(
np.cumsum(np.ma.masked_array(np.true_divide(self.joint_posterior, np.nansum(self.joint_posterior))))) if
x[1] > 0.025)]
self.CI_tin_high = self.TiN_range[
next(x[0] for x in enumerate(np.cumsum(
np.ma.masked_array(np.true_divide(self.joint_posterior, | np.nansum(self.joint_posterior) | numpy.nansum |
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cirq
# Python 2 gives a different repr due to unicode strings being prefixed with u.
@cirq.testing.only_test_in_python3
def test_wave_function_trial_result_repr():
final_simulator_state = cirq.WaveFunctionSimulatorState(
qubit_map={cirq.NamedQubit('a'): 0}, state_vector=np.array([0, 1]))
trial_result = cirq.WaveFunctionTrialResult(
params=cirq.ParamResolver({'s': 1}),
measurements={'m': np.array([[1]])},
final_simulator_state=final_simulator_state)
assert repr(trial_result) == (
"cirq.WaveFunctionTrialResult("
"params=cirq.ParamResolver({'s': 1}), "
"measurements={'m': array([[1]])}, "
"final_simulator_state=cirq.WaveFunctionSimulatorState("
"state_vector=array([0, 1]), "
"qubit_map={cirq.NamedQubit('a'): 0}))")
def test_wave_function_trial_result_equality():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
cirq.WaveFunctionTrialResult(
params=cirq.ParamResolver({}),
measurements={},
final_simulator_state=cirq.WaveFunctionSimulatorState(np.array([]),
{})),
cirq.WaveFunctionTrialResult(
params=cirq.ParamResolver({}),
measurements={},
final_simulator_state=cirq.WaveFunctionSimulatorState( | np.array([]) | numpy.array |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.parsing_ops."""
import itertools
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
def empty_sparse(dtype, shape=None):
if shape is None:
shape = [0]
return (np.empty(shape=(0, len(shape)), dtype=np.int64),
np.array([], dtype=dtype), np.array(shape, dtype=np.int64))
def flatten(list_of_lists):
"""Flatten one level of nesting."""
return itertools.chain.from_iterable(list_of_lists)
def flatten_values_tensors_or_sparse(tensors_list):
"""Flatten each SparseTensor object into 3 Tensors for session.run()."""
return list(
flatten([[v.indices, v.values, v.dense_shape] if isinstance(
v, sparse_tensor.SparseTensor) else [v] for v in tensors_list]))
def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
flat_output):
tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
i = 0 # Index into the flattened output of session.run()
for k, v in dict_tensors.items():
expected_v = expected_tensors[k]
tf_logging.info("Comparing key: %s", k)
if isinstance(v, sparse_tensor.SparseTensor):
# Three outputs for SparseTensor : indices, values, shape.
tester.assertEqual([k, len(expected_v)], [k, 3])
tester.assertAllEqual(expected_v[0], flat_output[i])
tester.assertAllEqual(expected_v[1], flat_output[i + 1])
tester.assertAllEqual(expected_v[2], flat_output[i + 2])
i += 3
else:
# One output for standard Tensor.
tester.assertAllEqual(expected_v, flat_output[i])
i += 1
class ParseExampleTest(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
with self.cached_session() as sess:
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
out = parsing_ops.parse_single_example(**kwargs)
sess.run(flatten_values_tensors_or_sparse(out.values()))
return
else:
# Returns dict w/ Tensors and SparseTensors.
out = parsing_ops.parse_single_example(**kwargs)
# Also include a test with the example names specified to retain
# code coverage of the unfused version, and ensure that the two
# versions produce the same results.
out_with_example_name = parsing_ops.parse_single_example(
example_names="name", **kwargs)
for result_dict in [out, out_with_example_name]:
result = flatten_values_tensors_or_sparse(result_dict.values())
# Check values.
tf_result = self.evaluate(result)
_compare_output_to_expected(self, result_dict, expected_values,
tf_result)
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(tuple(out[k].get_shape().as_list()), f.shape)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
tuple(out[k].indices.get_shape().as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.get_shape().as_list()), (None,))
self.assertEqual(
tuple(out[k].dense_shape.get_shape().as_list()), (1,))
@test_util.run_deprecated_v1
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = ( # indices, values, shape
np.empty((0, 1), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([0], dtype=np.int64)) # max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array([a_default]),
b_name: np.array(b_default),
c_name: np.array(c_default),
}
self._test({
"serialized": ops.convert_to_tensor(""),
"features": {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
}
}, expected_output)
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature(
(2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
{
"serialized": original.SerializeToString(),
"features": input_features,
},
expected_err=(errors_impl.OpError,
"Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
{
"serialized": "",
"features": input_features,
},
expected_err=(errors_impl.OpError,
"Feature: c \\(data type: float\\) is required"))
def testDenseNotMatchingShapeShouldFail(self):
original = example(features=features({
"a": float_feature([-1, -1]),
}))
serialized = original.SerializeToString()
self._test(
{
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
}
},
# TODO(mrry): Consider matching the `io.parse_example()` error message.
expected_err=(errors_impl.OpError, "Key: a."))
def testDenseDefaultNoShapeShouldFail(self):
original = example(features=features({
"a": float_feature([1, 1, 3]),
}))
serialized = original.SerializeToString()
self._test(
{
"serialized": ops.convert_to_tensor(serialized),
"features": {
"a": parsing_ops.FixedLenFeature(None, dtypes.float32)
}
},
expected_err=(ValueError, "Missing shape for feature a"))
@test_util.run_deprecated_v1
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([]), # empty float list
})),
example(features=features({
"st_d": feature(), # feature with nothing in it
})),
example(features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
expected_outputs = [{
"st_c": (np.array([[0], [1]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([2], dtype=np.int64)),
"st_d":
empty_sparse(bytes)
}, {
"st_c": empty_sparse(np.float32),
"st_d": empty_sparse(bytes)
}, {
"st_c": empty_sparse(np.float32),
"st_d": empty_sparse(bytes)
}, {
"st_c": (np.array([[0], [1], [2]], dtype=np.int64),
np.array([1.0, 2.0, -1.0], dtype=np.float32),
np.array([3], dtype=np.int64)),
"st_d": (np.array([[0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([1], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
},
}, expected_output)
def testSerializedContainingSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
expected_outputs = [{
"sp": (np.array([[5], [10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13], dtype=np.int64))
}, {
"sp": empty_sparse(np.float32, shape=[13])
}, {
"sp": empty_sparse(np.float32, shape=[13])
}, {
"sp": (np.array([[0], [3], [9]], dtype=np.int64),
np.array([1.0, -1.0, 2.0], dtype=np.float32),
np.array([13], dtype=np.int64))
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.float32,
[13])
}
}, expected_output)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
expected_outputs = [{
"sp1": (np.array([[5], [10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([13], dtype=np.int64)),
"sp2": (np.array([[5], [10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32),
np.array([7], dtype=np.int64))
}, {
"sp1": empty_sparse(np.float32, shape=[13]),
"sp2": empty_sparse(np.float32, shape=[7])
}]
for proto, expected_output in zip(original, expected_outputs):
self._test({
"serialized": ops.convert_to_tensor(proto.SerializeToString()),
"features": {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx",
"val2",
dtypes.float32,
size=7,
already_sorted=True)
}
}, expected_output)
def testSerializedContaining3DSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
expected_outputs = [{
"sp": ( | np.array([[5, 0], [10, 2]], dtype=np.int64) | numpy.array |
import numpy as np
import copy
from ..Utils.geometry import *
class LinearLeastSquare:
"""
Linear Least Square Fitting solution.
Parameters
----------
parameter_space : :obj:`ParaMol.Parameter_space.parameter_space.ParameterSpace`
Instance of the parameter space.
include_regulatization : bool
Flag that signal whether or not to include regularization.
method : str
Type of regularization. Options are 'L2' or 'hyperbolic'.
scaling_factor : float
Scaling factor of the regularization value.
hyperbolic_beta : float
Hyperbolic beta value. Only used if `regularization_type` is `hyperbolic`.
weighting_method : str
Method used to weight the conformations. Available methods are "uniform, "boltzmann" and "manual".
weighting_temperature : unit.simtk.Quantity
Temperature used in the weighting. Only relevant if `weighting_method` is "boltzmann".
Attributes
----------
include_regulatization : bool
Flag that signal whether or not to include regularization.
regularization_type : str
Type of regularization. Options are 'L2' or 'hyperbolic'.
scaling_factor : float
Scaling factor of the regularization value.
hyperbolic_beta : float
Hyperbolic beta value. Only used if `regularization_type` is `hyperbolic`.
weighting_method : str
Method used to weight the conformations. Available methods are "uniform, "boltzmann" and "manual".
weighting_temperature : unit.simtk.Quantity
Temperature used in the weighting. Only relevant if `weighting_method` is "boltzmann".
"""
def __init__(self, parameter_space, include_regularization, method, scaling_factor, hyperbolic_beta, weighting_method, weighting_temperature, **kwargs):
# Matrices used in the explicit solution of the LLS equations
self._parameter_space = parameter_space
self._parameters = None
self._n_parameters = None
# Private variables
self._A = None
self._B = None
self._Aw = None
self._Bw = None
self._param_keys_list = None
self._p0 = None
self._initial_param_regularization = None
# Regularization variables
self._include_regularization = include_regularization
self._regularization_type = method
self._scaling_factor = scaling_factor
self._hyperbolic_beta = hyperbolic_beta
# Weighting variables
self._weighting_method = weighting_method
self._weighting_temperature = weighting_temperature
def fit_parameters_lls(self, systems, alpha_bond=0.05, alpha_angle=0.05):
"""
Method that fits bonded parameters using LLS.
Notes
-----
Only one ParaMol system is supported at once.
Parameters
----------
systems : list of :obj:`ParaMol.System.system.ParaMolSystem`
List containing instances of ParaMol systems.
alpha_bond : float
alpha_angle : float
Returns
-------
systems, parameter_space, objective_function, optimizer
"""
assert self._weighting_method.upper() != "NON_BOLTZMANN", "LLS does not support {} weighting method.".format(self._weighting_method)
# TODO: In the future, adapt this to multiple systems
system = systems[0]
# Compute A matrix
self._calculate_a(system, alpha_bond, alpha_angle)
self._n_parameters = self._A.shape[1]
# Compute B matrix
self._calculate_b(system)
# ---------------------------------------------------------------- #
# Calculate conformations weights #
# ---------------------------------------------------------------- #
system.compute_conformations_weights(temperature=self._weighting_temperature, weighting_method=self._weighting_method, emm=None)
# Weight conformations
for row in range(system.n_structures):
self._A[row, :] = self._A[row, :] * np.sqrt(system.weights[row]) / np.sqrt(np.var(system.ref_energies))
self._B = self._B * np.sqrt(system.weights) / np.sqrt(np.var(system.ref_energies))
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Preconditioning #
# ---------------------------------------------------------------- #
# Preconditioning
self._calculate_scaling_constants()
for row in range(system.n_structures):
self._A[row, :] = self._A[row, :] / self._scaling_constants
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Regularization #
# ---------------------------------------------------------------- #
if self._include_regularization:
# Add regularization
self._A, self._B = self._add_regularization()
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Symmetries #
# ---------------------------------------------------------------- #
self._add_symmetries(system)
# ---------------------------------------------------------------- #
# Perform LLS
self._parameters = np.linalg.lstsq(self._A, self._B, rcond=None)[0]
# Revert scaling
self._parameters = self._parameters / self._scaling_constants
# Reconstruct parameters
self._reconstruct_parameters(self._parameters)
# Get optimizable parameters
self._parameter_space.get_optimizable_parameters([system], symmetry_constrained=False)
return self._parameter_space.optimizable_parameters_values
def fit_parameters_lls2(self, systems, alpha_bond=0.05, alpha_angle=0.05):
"""
Method that fits bonded parameters using LLS.
Notes
-----
Only one ParaMol system is supported at once.
Experimental function.
Parameters
----------
systems : list of :obj:`ParaMol.System.system.ParaMolSystem`
List containing instances of ParaMol systems.
alpha_bond : float
alpha_angle : float
Returns
-------
systems, parameter_space, objective_function, optimizer
"""
# TODO: In the future, adapt this to multiple systems
system = systems[0]
n_iter = 1
rmsd = 999
rmsd_tol = 1e-20
max_iter = 100000
# Self-consistent solution
while n_iter < max_iter and rmsd > rmsd_tol:
# Compute A matrix
self._calculate_a(system, alpha_bond, alpha_angle)
self._n_parameters = self._A.shape[1]
# Compute B matrix
self._calculate_b(system)
# ---------------------------------------------------------------- #
# Calculate conformations weights #
# ---------------------------------------------------------------- #
system.compute_conformations_weights(temperature=self._weighting_temperature, weighting_method=self._weighting_method, emm=system.get_energies_ensemble())
print(system.get_energies_ensemble())
# Weight conformations
for row in range(system.n_structures):
self._A[row, :] = self._A[row, :] * np.sqrt(system.weights[row])
self._B = self._B * np.sqrt(system.weights)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Preconditioning #
# ---------------------------------------------------------------- #
# Preconditioning
self._calculate_scaling_constants()
for row in range(system.n_structures):
self._A[row, :] = self._A[row, :] / self._scaling_constants
# ---------------------------------------------------------------- #
new_param = self._parameter_space.optimizable_parameters_values / self._parameter_space.scaling_constants
# ---------------------------------------------------------------- #
# Regularization #
# ---------------------------------------------------------------- #
if self._include_regularization:
# Add regularization
self._A, self._B = self._add_regularization()
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Symmetries #
# ---------------------------------------------------------------- #
self._add_symmetries(system)
# ---------------------------------------------------------------- #
# Perform LLS
self._parameters = np.linalg.lstsq(self._A, self._B, rcond=None)[0]
# Revert scaling
self._parameters = self._parameters / self._scaling_constants
# Reconstruct parameters
self._reconstruct_parameters(self._parameters)
# Get optimizable parameters
self._parameter_space.get_optimizable_parameters([system], symmetry_constrained=False)
self._parameter_space.update_systems(systems, self._parameter_space.optimizable_parameters_values, symmetry_constrained=False)
old_param = copy.deepcopy(new_param)
new_param = self._parameter_space.optimizable_parameters_values /self._parameter_space.scaling_constants
rmsd = np.sqrt(np.sum((old_param - new_param) ** 2) / len(self._parameter_space.optimizable_parameters_values))
a = np.sum(system.weights * (system.get_energies_ensemble() - system.ref_energies - np.mean(system.get_energies_ensemble() - system.ref_energies)) ** 2) / (np.var(system.ref_energies))
n_iter+=1
print("RMSD",n_iter, rmsd, a)
print("RMSD",n_iter, rmsd)
system.compute_conformations_weights(temperature=self._weighting_temperature, weighting_method=self._weighting_method, emm=system.get_energies_ensemble())
a = np.sum(system.weights*(system.get_energies_ensemble()-system.ref_energies-np.mean(system.get_energies_ensemble()-system.ref_energies)) **2) / (np.var(system.ref_energies))
print("FINAL",a)
return self._parameter_space.optimizable_parameters_values
def _add_regularization(self):
"""
Method that adds the regularization part of the A and B matrices.
Returns
-------
self._A, self._B
"""
# Create alpha=scaling_factor / scaling_constants
alpha = self._scaling_factor / self._scaling_constants
# TODO: think of how to make this division general
# Divide by two to make this approach equivalent to the remainder of ParaMol
# alpha = 0.5 * alpha
# Calculate prior widths
self._calculate_prior_widths()
# Calculate A_reg
A_reg = np.identity(self._n_parameters)
for row in range(A_reg.shape[0]):
A_reg[row, :] = (A_reg[row, :]) / self._prior_widths
A_reg = A_reg * alpha
# Update A matrix
self._A = np.vstack((self._A, A_reg))
# Calculate B_reg
#B_reg = np.zeros((n_parameters))
B_reg = alpha * self._initial_param_regularization
# Update B matrix
self._B = np.concatenate((self._B, B_reg))
print("Added regularization.")
return self._A, self._B
def _add_symmetries(self, system):
"""
Method that adds the symmetrie part of the A and B matrices.
Returns
-------
self._A, self._B
"""
n_symmetries = 0
symm_covered = []
A_symm = []
for i in range(len(self._param_symmetries_list)):
symm_i = self._param_symmetries_list[i]
if symm_i in symm_covered or symm_i in ["X_x", "X_y", "X"]:
continue
for j in range(i + 1, len(self._param_symmetries_list)):
symm_j = self._param_symmetries_list[j]
if symm_i == symm_j:
A_symm_row = np.zeros((self._n_parameters))
A_symm_row[i] = 1.0
A_symm_row[j] = -1.0
A_symm.append(A_symm_row)
n_symmetries += 1
symm_covered.append(symm_i)
A_symm = np.asarray(A_symm)
# Update matrices
if n_symmetries > 0:
self._A = np.vstack((self._A, A_symm))
# Calculate B_reg
B_symm = np.zeros((n_symmetries))
# Update B matrix
self._B = np.concatenate((self._B, B_symm))
print("{} symmetries were found".format(n_symmetries))
return self._A, self._B
def _calculate_prior_widths(self, method=None):
""""
Method that generates the prior_widths vector.
Parameters
----------
method : str, optional
Method used to generate the prior widths.
Returns
-------
self._prior_widths : np.array
Array containing the prior widths.
"""
self._prior_widths = []
prior_widths_dict, prior_widths = self._parameter_space.calculate_prior_widths(method=method)
for i in range(self._n_parameters):
self._prior_widths.append(prior_widths_dict[self._param_keys_list[i]])
self._prior_widths = np.asarray(self._prior_widths)
return self._prior_widths
def _calculate_scaling_constants(self, method=None):
"""
Method that generates the scaling constant's vector.
Parameters
----------
method : str, optional
Method used to generate the prior widths.
Returns
-------
self._prior_widths : np.array
Array containing the scaling constants.
"""
self._scaling_constants = []
scaling_constants_dict, scaling_constants = self._parameter_space.calculate_scaling_constants(method=method)
for i in range(self._n_parameters):
self._scaling_constants.append(scaling_constants_dict[self._param_keys_list[i]])
self._scaling_constants = np.asarray(self._scaling_constants)
return self._scaling_constants
def _reconstruct_parameters(self, final_parameters):
"""
Method that reconstructs the parameters after the LLS.
Parameters
----------
final_parameters : np.array or list
List containing the final parameters.
Returns
-------
"""
m = 0
for parameter in self._parameter_space.optimizable_parameters:
ff_term = parameter.ff_term
# ---------------------------------------------------------------- #
# Bonds #
# ---------------------------------------------------------------- #
if parameter.param_key == "bond_k":
if ff_term.parameters["bond_eq"].optimize:
k_xy = np.asarray(final_parameters[m:m + 2])
x0_xy = np.asarray(self._p0[m:m+2])
# Update value of "bond_k"
parameter.value = np.sum(k_xy)
# Update value of "bond_eq"
ff_term.parameters["bond_eq"].value = np.sum(k_xy*x0_xy) / np.sum(k_xy)
m += 2
else:
k_xy = final_parameters[m]
# Update value of "bond_k"
parameter.value = k_xy
m += 1
# ---------------------------------------------------------------- #
# Angles #
# ---------------------------------------------------------------- #
elif parameter.param_key == "angle_k":
if ff_term.parameters["angle_eq"].optimize:
k_xy = np.asarray(final_parameters[m:m + 2])
theta0_xy = np.asarray(self._p0[m:m+2])
# Update value of "bond_k"
parameter.value = np.sum(k_xy)
# Update value of "bond_eq"
ff_term.parameters["angle_eq"].value = np.sum(k_xy*theta0_xy) / np.sum(k_xy)
m += 2
else:
k_xy = final_parameters[m]
# Update value of "bond_k"
parameter.value = k_xy
m += 1
# ---------------------------------------------------------------- #
# Torsions #
# ---------------------------------------------------------------- #
elif parameter.param_key == "torsion_k":
if ff_term.parameters["torsion_phase"].optimize:
k_xy = np.asarray(final_parameters[m:m + 2])
delta_xy = np.asarray(self._p0[m:m + 2])
# Define phasors
p_x = k_xy[0]*np.exp(1j*delta_xy[0])
p_y = k_xy[1]* | np.exp(1j*delta_xy[1]) | numpy.exp |
import logging
from amlb.benchmark import TaskConfig
from amlb.data import Dataset
from amlb.datautils import impute
from amlb.results import save_predictions
from amlb.utils import Timer
from sklearn.preprocessing import OrdinalEncoder
import numpy as np
import pandas as pd
from frameworks.shared.callee import save_metadata
import torch
from pytorch_tabnet.tab_model import TabNetClassifier, TabNetRegressor
log = logging.getLogger(__name__)
def run(dataset:Dataset, config: TaskConfig):
log.info("****TabNet****")
save_metadata(config)
is_classification = config.type == 'classification'
X_train, X_test = dataset.train.X, dataset.test.X
X_train, X_test = impute(X_train, X_test)
X = | np.concatenate((X_train, X_test), axis=0) | numpy.concatenate |
import numpy as np
import matplotlib
#print ("Matplotlib Version :",matplotlib.__version__)
import pylab as pl
import time, sys, os
import decimal
import glob
from subprocess import call
from IPython.display import Image
from matplotlib.pyplot import figure, imshow, axis
from matplotlib.image import imread
#from sympy import *
#from mpmath import quad
from scipy.integrate import quad
import random
import string
vol_frac = 0.5
radius_cyl = np.sqrt(vol_frac/np.pi)
rho = 1000
mu = 0.001
L = 2*radius_cyl
def Reynolds( V_mean, L, rho=1000, mu=0.001):
Re_actual = rho*V_mean*L/mu
return Re_actual
def majorAxis(alpha):
return np.sqrt((0.5/np.pi)/alpha)
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
def plot_fourier_curve(shape):
# input( coeffs ): the fourier coefficients of dimension 2,2*M+1, where M is the maximum degree.
# output plot: Plots the shape
coeffs = shape["coeffs"]
name =shape["name"]
x_coeffs = coeffs[0,:]
y_coeffs = coeffs[1,:]
M = (np.shape(coeffs)[1] -1 ) // 2
start_t = 0.0
t = np.linspace(start_t,start_t+2.0*np.pi,num=100,endpoint=True)
#print((t))
x = np.zeros(np.shape(t))
y = np.zeros(np.shape(t))
x += 0.5*x_coeffs[0] ; y += 0.5*y_coeffs[0]
for mi in range(1,M+1):
x += x_coeffs[2*mi-1]*np.cos(mi*t) + x_coeffs[2*mi]*np.sin(mi*t)
y += y_coeffs[2*mi-1]*np.cos(mi*t) + y_coeffs[2*mi]*np.sin(mi*t)
pl.plot(x,y,'k-')
head = "shape "+name
curve = np.column_stack((x,y))
np.savetxt(name,curve,delimiter=" ")#,header=head)
pl.axis('equal')
pl.title('Shape from Fourier Coeffs.')
pl.show()
coords = {"x":x,
"y":y}
return coords
def minkowski_fourier_curve(coeffs):
# input( shape ): contains the key "coeffs" -the fourier coefficients of dimension 2,2*M+1, where M is the maximum degree.
# and the key "name" for shape name.
# output (W) : Dictionary containing the four 2D minkowski tensors W020, W120, W220, W102 and the area
# and perimeter of the curve/shape.
#coeffs = shape["coeffs"]
t=symbols("t") # parameter of the curve
x_coeffs = coeffs[0,:]
y_coeffs = coeffs[1,:]
# m =0 , zeroth degree terms, also gives the centroid of the shape.
expr_X = "0.5*"+str(coeffs[0,0])
expr_Y = "0.5*"+str(coeffs[1,0])
M = (np.shape(coeffs)[1] -1)//2
# X and Y coodinates as parametric representation using fourier series.
for mi in range(1,M+1):
expr_X += "+" + str(x_coeffs[2*mi-1]) + "*cos("+str(mi)+"*t) + " +str(x_coeffs[2*mi])+"*sin("+str(mi)+"*t)"
expr_Y += "+" + str(y_coeffs[2*mi-1]) + "*cos("+str(mi)+"*t) + " +str(y_coeffs[2*mi])+"*sin("+str(mi)+"*t)"
# derivative terms required for normal and curvature computation
sym_x = sympify(expr_X)
sym_y = sympify(expr_Y)
# dx/dt
sym_dx = diff(sym_x,t)
# d^2x/dt^2
sym_ddx = diff(sym_dx,t)
# dA = ydx infinitesimal area
sym_ydx = sym_y*sym_dx
sym_dy = diff(sym_y,t)
sym_ddy = diff(sym_dy,t)
# ds = sqrt(x'^2 + y'^2) , the infinitesimal arc-length
sym_ds = sqrt(sym_dx**2 + sym_dy**2)
# position vector r
sym_r = [sym_x, sym_y]
# unit normal vector n
sym_norm_mag = sqrt(sym_dx**2 + sym_dy**2)
sym_norm = [sym_dx/sym_norm_mag, sym_dy/sym_norm_mag]
#print("Computed derivatives")
# Area = \int ydx
area = Integral(sym_ydx,(t,0,2*pi)).evalf(5)
perimeter = Integral(sym_ds,(t,0,2*pi)).evalf(5)
kappa = (sym_dx*sym_ddy - sym_dy*sym_ddx)/(sym_dx**2 + sym_dy**2)**(3/2)
#print("Computing integrals ...")
#Initialize the minkowski tensors
W020 = np.zeros((2,2))
W120 = np.zeros((2,2))
W220 = np.zeros((2,2))
W102 = np.zeros((2,2))
x = symbols('x')
#tensor computation
for ia in range(2):
for ib in range(2):
# W020[ia,ib] = Integral(sym_r[ia]*sym_r[ib]*sym_ydx, (t,0,2*pi)).evalf(5)
# print("Computing W120 ...")
# W120[ia,ib] = 0.5* Integral(sym_r[ia]*sym_r[ib]*sym_ds, (t,0,2*pi)).evalf(5)
# W220[ia,ib] = 0.5* Integral(kappa*sym_r[ia]*sym_r[ib]*sym_ds, (t,0,2*pi)).evalf(5)
# print("Computing W102 ...")
# W102[ia,ib] = 0.5* Integral(sym_norm[ia] * sym_norm[ib]*sym_ds,(t,0,2*pi)).evalf(5)
f = lambdify(t,sym_r[ia]*sym_r[ib]*sym_ydx)
W020[ia,ib],err = quad( f, 0,2*np.pi)
#print(W020[ia,ib])
#print("Computing W120 ...")
f = lambdify(t,sym_r[ia]*sym_r[ib]*sym_ds)
W120[ia,ib],err = quad(f, 0,2*np.pi)
W120[ia,ib] = 0.5* W120[ia,ib]
f = lambdify(t,kappa*sym_r[ia]*sym_r[ib]*sym_ds)
W220[ia,ib],err = quad(f, 0,2*np.pi)
W220[ia,ib] = 0.5*W220[ia,ib]
#print("Computing W102 ...")
f = lambdify(t,sym_norm[ia] * sym_norm[ib]*sym_ds)
W102[ia,ib], err = quad(f, 0,2*np.pi)
W102[ia,ib] = 0.5* W102[ia,ib]
#dictionary with computed quantities
W={"W020":W020,
"W120":W120,
"W220":W220,
"W102":W102,
"area":area,
"perimeter":perimeter
}
return W
# def simulate_flow():
# DIR = './shapes/coords'
# createFolder('./simulations')
# start_t = time.time()
# name_list = []
# num = 0
# #n_angles =20
# n_shapes = len(os.listdir(DIR))
# for name in os.listdir(DIR):
# if os.path.isfile(os.path.join(DIR,name)):
# update_progress(num/n_shapes,start_t,time.time())
# num += 1
# thisfolder ='./simulations/'+name
# createFolder(thisfolder)
# #print("Shape No. "+str(num)+" : "+name)
# #for angle in range(n_angles):
# #theta = random.uniform(0.0,np.pi)
# # thisfolder ='./simulations/'+name+'/theta_'+str(round(theta,3))
# # createFolder(thisfolder)
# call(["cp","vorticity.gfs",thisfolder+'/.'])
# call(["cp","xprofile",thisfolder+'/.'])
# f=open(thisfolder+"/shape.gts","w")
# call(["shapes",os.path.join(DIR,name)],stdout=f) #+" > "+thisfolder+"/shape.gts"])
# os.chdir(thisfolder)
# call(["gerris2D","vorticity.gfs"])
# #xp = (np.loadtxt('xprof', delimiter=" "))
# #pl.plot(xp[:,6],xp[:,2],label=r'$\theta =$') #thets
# #Vel_mean[i,1] = np.mean(xp[:,6])
# #Vel_mean[i,0] = theta
# #Image("velocity.png")
# os.chdir('../../')
# #name_list.append(name)
# n_simulations = n_shapes
def simulate_flow(dp=0.000001,DIR='./shapes_low0/coords'):
# DIR = './shapes_low0/coords'
# dp_0 = 0.000001
# p_ratio = round(dp_0/dp,2)
dp_string = '{:.0e}'.format(decimal.Decimal(str(dp)))
folder_name ='./simulations_dP_'+dp_string
input_file ='vorticity_'+dp_string+'.gfs'
with open('vorticity.gfs','r') as fin:
# # with is like your try .. finally block in this case
input_string = fin.readlines()
for index, line in enumerate(input_string):
if line.strip().startswith('Source {} U'):
input_string[index] = 'Source {} U '+str(dp)
with open(input_file, 'w') as file:
file.writelines( input_string )
createFolder(folder_name)
start_t = time.time()
name_list = []
num = 0
#n_angles =20
n_shapes = len(os.listdir(DIR))
for name in os.listdir(DIR):
if os.path.isfile(os.path.join(DIR,name)):
update_progress(num/n_shapes,start_t,time.time())
num += 1
thisfolder =folder_name + '/' + name
createFolder(thisfolder)
#print("Shape No. "+str(num)+" : "+name)
#for angle in range(n_angles):
#theta = random.uniform(0.0,np.pi)
# thisfolder ='./simulations/'+name+'/theta_'+str(round(theta,3))
# createFolder(thisfolder)
call(["cp", input_file ,thisfolder+'/.'])
call(["cp","xprofile",thisfolder+'/.'])
f=open(thisfolder+"/shape.gts","w")
call(["shapes",os.path.join(DIR,name)],stdout=f) #+" > "+thisfolder+"/shape.gts"])
os.chdir(thisfolder)
call(["gerris2D",input_file])
#xp = (np.loadtxt('xprof', delimiter=" "))
#pl.plot(xp[:,6],xp[:,2],label=r'$\theta =$') #thets
#Vel_mean[i,1] = np.mean(xp[:,6])
#Vel_mean[i,0] = theta
#Image("velocity.png")
os.chdir('../../')
#name_list.append(name)
n_simulations = n_shapes
def fourier2Cart(coeffs,t):
#x_coeffs = coeffs[0,:]
#y_coeffs = coeffs[1,:]
#M = (np.shape(coeffs)[1] -1 ) // 2
#x = np.zeros(np.shape(t))
#y = np.zeros(np.shape(t))
#x += 0.5*x_coeffs[0] ; y += 0.5*y_coeffs[0]
#for mi in range(1,M+1):
# x += x_coeffs[2*mi-1]*np.cos(mi*t) + x_coeffs[2*mi]*np.sin(mi*t)
# y += y_coeffs[2*mi-1]*np.cos(mi*t) + y_coeffs[2*mi]*np.sin(mi*t)
#t.reshape(len(t))
#t=t[:,np.newaxis].T
tt = np.row_stack((t,t))
#print(np.shape(tt))
coords = np.zeros(np.shape(tt))
coords += 0.5*coeffs[:,0,np.newaxis]
M = (np.shape(coeffs)[1] -1 ) // 2
for mi in range(1,M+1):
coords += coeffs[:,2*mi-1,np.newaxis]*np.cos( mi*tt) + coeffs[:,2*mi,np.newaxis]*np.sin(mi*tt)
#coords = np.row_stack((x,y))
return coords
def generateShape(res=200,M=4):
t = np.linspace(0, 2.0*np.pi, num=res, endpoint=True)
dt = t[1]-t[0]
coeffs = np.zeros((2,2*M+1))
bad_shape = True
n_attempts = 0
while bad_shape == True:
alpha = np.random.uniform(1.0,2.0)
a = majorAxis(alpha)
b = alpha*a
#a = 1
#b = 1
#print("the major and minor axes are:"+str(a)+","+str(b))
coeffs[0,1] = a # create an ellipse as starting point
coeffs[1,2] = b # create an ellipse as starting point
coeffs[:,3::] = coeffs[:,3::] + 0.25*a*(np.random.rand(2,2*M-2) -0.5)#-0.5
coords = fourier2Cart(coeffs,t)
#pl.plot(coords[0,:],coords[1,:],'-')
dx = np.gradient(coords,axis=1)
ddx = np.gradient(dx, axis=1)
num = dx[0,:] * ddx[1,:] - ddx[0,:] * dx[1,:]
denom = dx[0,:] * dx[0,:] + dx[1,:] * dx[1,:]
denom = np.sqrt(denom)
denom = denom * denom * denom
curvature = num / denom
sharp_edge = False
outside_domain = False
if (np.amax(np.absolute(curvature)) > 20):
sharp_edge = True
coords_prime = np.gradient(coords,dt,axis=1)
integrand = coords_prime[1,:] * coords[0,:]
area = np.trapz(integrand, x=t)
scale = np.sqrt(0.5 / np.absolute(area))
coeffs = scale * coeffs
coords = fourier2Cart(coeffs,t)
if(np.any(np.abs(coords) >= 0.5)):
outside_domain = True
bad_shape = sharp_edge or outside_domain
n_attempts +=1
#if(bad_shape):
# print( "This shape is bad:"+str(sharp_edge)+str(outside_domain))
#x_coeffs_prime = x_coeffs[1:]
#y_coeffs_prime = y_coeffs[1:]
coords_prime = np.gradient(coords,dt,axis=1)
integrand = coords[1,:] * coords_prime[0,:]
area = np.trapz(integrand, x=t)
# x = np.append(x, x[0])
# y = np.append(y, y[0])
length = np.sum( np.sqrt(np.ediff1d(coords[0,:]) * np.ediff1d(coords[0,:]) + np.ediff1d(coords[1,:]) * np.ediff1d(coords[1,:])) )
print('x-coefficients: ' + str(coeffs[0,:]))
print('y-coefficients: ' + str(coeffs[1,:]))
print('enclosed area: ' + str(np.absolute(area)))
print('curve length: ' + str(length))
shape={"coeffs":coeffs,
"coords":coords}
pl.plot(coords[0,:],coords[1,:],'-')
return shape
def check_self_intersection(coords):
result = False
for i in range(2,np.shape(coords)[1]-1):
p = coords[:,i]
dp = coords[:,i+1] - p
for j in range(0,i-2):
if (result==False):
q = coords[:,j]
dq = coords[:,j+1] - q
dpdq = np.cross(dp,dq)
t = np.cross(q-p,dq)/dpdq
u = np.cross(q-p,dp)/dpdq
if(dpdq != 0):
if(0<= t <= 1):
if(0<= u <= 1):
result = True
return result
def check_domain_intersection(coords):
result = np.any(np.abs(coords)>= 0.5)
return result
def generate_Npoint_shape(N=10,M=4, res=100):
#random.seed(1516)
bad_shape =True
while bad_shape == True:
pos_r = np.random.uniform(0,0.5,(N))
#pos_thet = np.random.uniform(0,2*np.pi,(1,N))
pos_thet =np.linspace(0,2*np.pi,num=N,endpoint=False)
posx = pos_r*np.cos(pos_thet)
posy = pos_r*np.sin(pos_thet)
pos = np.row_stack((posx,posy))
center = np.mean(pos,axis=1)
r = pos - center[:,np.newaxis]
r_mag = np.sqrt(r[0,:]**2 + r[1,:]**2)
x = np.zeros((2,np.shape(r)[1]))
x[0,:] = 1
costh =np.diag(np.matmul(r.T,x))#r.x
costh = costh/r_mag
theta = np.arccos(costh)
ry = r[1,:]
rx = r[0,:]
neg = np.where(ry<0)
theta[neg] = 2*np.pi - theta[neg]
#print(r)
#print(theta)
rx = rx[np.argsort(theta)]
ry = ry[np.argsort(theta)]
theta = theta[np.argsort(theta)]
#print(rx,ry)
#print(theta)
b = np.append(rx,ry)
#print(np.shape(b))
#M = 4
m = 2*M+1
A = np.zeros((N,m))
A[:,0] = 1.0
for j in range(1,M+1):
A[:,2*j-1] = np.cos(j*theta)
A[:,2*j] = np.sin(j*theta)
# Use the same A for both x and y coordinates.
AA = np.matmul(A.T,A)
#print("solving")
#print(np.shape(AA))
#print(np.shape(rx))
coeffs_x = np.linalg.solve(AA,np.matmul(A.T,rx))
coeffs_y = np.linalg.solve(AA,np.matmul(A.T,ry))
coeffs = np.row_stack((coeffs_x,coeffs_y))
#oeffs = scale_area(coeffs)
#coeffs[:,2*mi-1,np.newaxis]*np.cos( mi*tt) + coeffs[:,2*mi,np.newaxis]*np.sin(mi*tt)
#np.cos(M*theta)
t = np.linspace(0, 2.0*np.pi, num=res, endpoint=True)
dt = t[1]-t[0]
coords = fourier2Cart(coeffs,t)
coords_prime = np.gradient(coords,dt,axis=1)
integrand = coords_prime[1,:] * coords[0,:]
area = np.trapz(integrand, x=t)
self_intersection = check_self_intersection(coords)
scale = np.sqrt(0.5 / np.absolute(area))
coeffs = scale * coeffs
coords = fourier2Cart(coeffs,t)
domain_intersection = check_domain_intersection(coords)
#bad_shape = False
bad_shape = self_intersection or domain_intersection
# pl.figure(figsize=(8,4))
# pl.subplot(121,projection='polar')
# pl.plot(pos_thet,pos_r,'o')
# pl.grid(True)
# pl.subplot(122)
# pl.axis('equal')
# pl.xlim(-0.5,0.5)
# pl.ylim(-0.5,0.5)
# pl.plot(r[0,:],r[1,:],'o')
# pl.plot(coords[0,:],coords[1,:],'r-')
shape={"coeffs":coeffs,
"coords":coords}
#pl.plot(coords[0,:],coords[1,:],'-')
return shape
def plot_shapes(shapes):
nr = len(shapes)//5
width = 10
height = nr*width/5
pl.figure(figsize=(width,height))
for i in range(len(shapes)):
showImageinArray(i,len(shapes),shapes[i]["coords"])
def showImageinArray(i,N,coords):
#fig = figure(figsize=(6,6))
#number_of_files = len(list_of_files)
#print(number_of_files)
im_per_row = 5
numrows = N // im_per_row
#remaining = i % im_per_row
#for i in range(numrows+1):
# for j in range(im_per_row):
# k = i*im_per_row + j
# if (k<number_of_files):
pl.subplot(numrows+1,im_per_row,i+1)
pl.plot(coords[:,0],coords[:,1],'r-')
pl.plot(axis='equal')
pl.subplots_adjust(bottom=0.0)
pl.axis('off')
#pl.show()
def update_progress(progress, start, now):
barLength = 15 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
if(progress == 0 ):
time_left = -1
else:
time_left = (now-start)/progress - (now-start)
text = "\rPercent: [{0}] {1}% {2} {3} min".format( "#"*block + "-"*(barLength-block), round(progress*100,2), status,round(time_left/60,2))
sys.stdout.write(text)
sys.stdout.flush()
def write_shape(shape):
#N = len(shapes)
#random.seed(1516)
createFolder('./shapes')
coeff_folder = "./shapes/"+"coeffs/"
coord_folder = "./shapes/"+"coords/"
createFolder(coeff_folder)
createFolder(coord_folder)
#for i in range(len(shapes)):
name = id_generator()
#shape["name"] = name
coord_file = coord_folder+name
coeff_file = coeff_folder+name
np.savetxt(coord_file,shape["coords"].T,delimiter=' ')
np.savetxt(coeff_file,shape["coeffs"].T,delimiter=' ')
return name
def id_generator(size=6, chars=string.ascii_uppercase+string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def read_shapes():
DIR = './shapes/coeffs'
#createFolder('./simulations')
shapes=[]
num = 0
#n_angles =20
for name in os.listdir(DIR):
if os.path.isfile(os.path.join(DIR,name)):
coeffs = np.loadtxt(name,delimiter=' ')
coeffs = coeffs.T
shape= {"coeffs": coeffs}
shapes.append(shape)
return shapes
def compute_minkowski_tensors():
DIR = './shapes/coeffs'
mt_folder = './shapes/MT'
createFolder(mt_folder)
start_t = time.time()
name_list = []
num = 0
n_shapes = len(os.listdir(DIR))
for name in os.listdir(DIR):
if os.path.isfile(os.path.join(DIR,name)):
update_progress(num/n_shapes,start_t, time.time())
num += 1
coeffs = np.loadtxt(os.path.join(DIR,name),delimiter=' ')
coeffs = coeffs.T
W = minkowski_fourier_curve(coeffs)
W_write = np.row_stack((W["W020"],W["W120"],W["W220"],W["W102"]))
mt_file = os.path.join(mt_folder,name)
np.savetxt(mt_file,W_write,delimiter=' ')
def consolidate_coords():
DIR = './shapes/coords'
createFolder('./shapes/coords_consolidated')
n_shapes= len(os.listdir(DIR))
allnames = os.listdir(DIR)
allnames.sort()
Xcoord =np.zeros((n_shapes,100))
Ycoord =np.zeros((n_shapes,100))
length =np.zeros((n_shapes,1))
for counter,name in enumerate(allnames):
localfile = os.path.join(DIR,name)
arr = np.loadtxt(localfile)
npoints = np.shape(arr)[0]
# find total length
len_ = 0.0
for j in range(npoints-1):
len_ = len_ + np.sqrt((arr[j+1,0]-arr[j,0])**2 + (arr[j+1,1]-arr[j,1] )**2 )
if (npoints == 200):
arr_X=arr[::2,0]
arr_Y=arr[::2,1]
elif (npoints == 300):
arr_X=arr[::3,0]
arr_Y=arr[::3,1]
else :
arr_X = arr[:,0]
arr_Y = arr[:,1]
assert np.shape(arr_X)[0]==100 , " Array x problem"
assert np.shape(arr_Y)[0]==100 , " Array y problem"
Xcoord[counter,:] = arr_X
Ycoord[counter,:] = arr_Y
length[counter,0] = len_
np.savetxt('./shapes/coords_consolidated/Xcoord',Xcoord,delimiter=' ')
| np.savetxt('./shapes/coords_consolidated/Ycoord',Ycoord,delimiter=' ') | numpy.savetxt |
import copy
from typing import Iterable
import numba as nb
import numpy as np
import spectrum_utils.spectrum as sus
def dot(spectrum1: sus.MsmsSpectrum, spectrum2: sus.MsmsSpectrum,
fragment_mz_tolerance: float) -> float:
"""
Compute the dot product between the given spectra.
Parameters
----------
spectrum1 : sus.MsmsSpectrum
The first spectrum.
spectrum2 : sus.MsmsSpectrum
The second spectrum.
fragment_mz_tolerance : float
The fragment m/z tolerance used to match peaks.
Returns
-------
float
The dot product similarity between the given spectra.
"""
return _dot(spectrum1.mz, _norm_intensity( | np.copy(spectrum1.intensity) | numpy.copy |
'''
Methods to convert data between physical (cMpc) coordinates
and observational (angular-frequency) coordinates.
'''
import numpy as np
from .lightcone import redshifts_at_equal_comoving_distance
from . import cosmology as cm
from . import conv
from . import helper_functions as hf
from . import smoothing
from . import const
from scipy.signal import fftconvolve
def physical_lightcone_to_observational(physical_lightcone, input_z_low, output_dnu, output_dtheta, input_box_size_mpc=None):
'''
Interpolate a lightcone volume from physical (length) units
to observational (angle/frequency) units.
Parameters:
physical_lightcone (ndarray): the lightcone volume
input_z_low (float): the lowest redshift of the input lightcone
output_dnu (float): the frequency resolution of the output volume in MHz
output_dtheta (float): the angular resolution of the output in arcmin
input_box_size_mpc (float): the size of the input FoV in Mpc.
If None (default), this will be set to conv.LB
Returns:
* The output volume as a numpy array
* The output frequencies in MHz as an array of floats
'''
if input_box_size_mpc == None:
input_box_size_mpc = conv.LB
#For each output redshift: average the corresponding slices
hf.print_msg('Making observational lightcone...')
hf.print_msg('Binning in frequency...')
lightcone_freq, output_freqs = bin_lightcone_in_frequency(physical_lightcone,\
input_z_low, input_box_size_mpc, output_dnu)
#Calculate the FoV in degrees at lowest z (largest one)
fov_deg = cm.angular_size_comoving(input_box_size_mpc, input_z_low)
#Calculate dimensions of output volume
n_cells_theta = int(fov_deg*60./output_dtheta)
n_cells_nu = len(output_freqs)
#Go through each slice and make angular slices for each one
hf.print_msg('Binning in angle...')
output_volume = np.zeros((n_cells_theta, n_cells_theta, n_cells_nu))
for i in range(n_cells_nu):
if i%10 == 0:
hf.print_msg('Slice %d of %d' % (i, n_cells_nu))
z = cm.nu_to_z(output_freqs[i])
output_volume[:,:,i] = physical_slice_to_angular(lightcone_freq[:,:,i], z, \
slice_size_mpc=input_box_size_mpc, fov_deg=fov_deg,\
dtheta=output_dtheta, order=2)
return output_volume, output_freqs
def observational_lightcone_to_physical(observational_lightcone, input_freqs, input_dtheta):
'''
Interpolate a lightcone volume measured in observational (angle/frequency)
units into physical (length) units. The output resolution will be set
to the coarest one, as determined either by the angular or the frequency
resolution. The lightcone must have the LoS as the last index, with
frequencies decreasing along the LoS.
Parameters:
observational_lightcone (numpy array): the input lightcone volume
input_freqs (numpy array): the frequency in MHz of each slice along the
line of sight of the input
input_dheta (float): the angular size of a cell in arcmin
Returns:
* The output volume
* The redshifts along the LoS of the output
* The output cell size in Mpc
'''
assert input_freqs[0] > input_freqs[-1]
assert observational_lightcone.shape[0] == observational_lightcone.shape[1]
#Determine new cell size - set either by frequency or angle.
#The FoV size in Mpc is set by the lowest redshift
dnu = input_freqs[0]-input_freqs[1]
z_low = cm.nu_to_z(input_freqs[0])
fov_deg = observational_lightcone.shape[0]*input_dtheta/60.
fov_mpc = fov_deg/cm.angular_size_comoving(1., z_low)
cell_size_perp = fov_mpc/observational_lightcone.shape[0]
cell_size_par = cm.nu_to_cdist(input_freqs[-1])-cm.nu_to_cdist(input_freqs[-2])
output_cell_size = max([cell_size_par, cell_size_perp])
hf.print_msg('Making physical lightcone with cell size %.2f Mpc' % output_cell_size)
#Go through each slice along frequency axis. Cut off excess and
#interpolate down to correct resolution
n_cells_perp = int(fov_mpc/output_cell_size)
output_volume_par = np.zeros((n_cells_perp, n_cells_perp, observational_lightcone.shape[2]))
for i in range(output_volume_par.shape[2]):
z = cm.nu_to_z(input_freqs[i])
output_volume_par[:,:,i] = angular_slice_to_physical(observational_lightcone[:,:,i],\
z, slice_size_deg=fov_deg, output_cell_size=output_cell_size,\
output_size_mpc=fov_mpc, order=2)
#Bin along frequency axis
output_volume, output_redshifts = bin_lightcone_in_mpc(output_volume_par, \
input_freqs, output_cell_size)
return output_volume, output_redshifts, output_cell_size
def physical_slice_to_angular(input_slice, z, slice_size_mpc, fov_deg, dtheta, order=0):
'''
Interpolate a slice in physical coordinates to angular coordinates.
Parameters:
input_slice (numpy array): the 2D slice in physical coordinates
z (float): the redshift of the input slice
slice_size_Mpc (float): the size of the input slice in cMpc
fov_deg (float): the field-of-view in degrees. The output will be
padded to match this size
dtheta (float): the target resolution in arcmin
Returns:
(angular_slice, size_deg)
'''
#Resample
fov_mpc = cm.deg_to_cdist(fov_deg, z)
cell_size_mpc = fov_mpc/(fov_deg*60./dtheta)
n_cells_resampled = int(slice_size_mpc/cell_size_mpc)
#Avoid edge effects with even number of cells
if n_cells_resampled % 2 == 0:
n_cells_resampled -= 1
resampled_slice = resample_slice(input_slice, n_cells_resampled, order)
#Pad the array
slice_n = resampled_slice.shape[0]
padded_n = int(fov_deg*60./dtheta)# np.round(slice_n*(fov_mpc/slice_size_mpc))
if padded_n < slice_n:
if slice_n - padded_n > 2:
print('Warning! Padded slice is significantly smaller than original!')
print('This should not happen...')
padded_n = slice_n
padded_slice = _get_padded_slice(resampled_slice, padded_n)
return padded_slice
def angular_slice_to_physical(input_slice, z, slice_size_deg, output_cell_size, output_size_mpc, order=0, prefilter=True):
'''
Interpolate a slice in angular coordinates to physical
Parameters:
input_slice (numpy array): the 2D slice in observational coordinates
z (float): the redshift of the input slice
slice_size_deg (float): the size of the input slice in deg
output_cell_size (float): the output cell size in cMpc
output_size_mpc (float): the output size in mpc
Returns:
(physical_slice, size_mpc)
'''
#Resample
slice_size_mpc = cm.deg_to_cdist(slice_size_deg, z)
n_cells_resampled = int(slice_size_mpc/output_cell_size)
#Avoid edge effects with even number of cells
if n_cells_resampled % 2 == 0:
n_cells_resampled += 1
resampled_slice = resample_slice(input_slice, n_cells_resampled, order, prefilter)
#Remove cells to get correct size
n_cutout_cells = int(output_size_mpc/output_cell_size)# np.round(resampled_slice.shape[0]*output_size_mpc/slice_size_mpc)
if n_cutout_cells > input_slice.shape[0]:
if input_slice.shape[0] - n_cutout_cells > 2:
print('Warning! Cutout slice is larger than original.')
print('This should not happen')
n_cutout_cells = input_slice.shape[0]
slice_cutout = resampled_slice[:n_cutout_cells, :n_cutout_cells]
return slice_cutout
def resample_slice(input_slice, n_output_cells, order=0, prefilter=True):
'''
Resample a 2D slice to new dimensions.
Parameters:
input_slice (ndarray): the input slice
n_output_cells (int) : the number of output cells
Returns:
output slice
'''
tophat_width = np.round(input_slice.shape[0]/n_output_cells)
if tophat_width < 1 or (not prefilter):
tophat_width = 1
slice_smoothed = smoothing.smooth_tophat(input_slice, tophat_width)
idx = np.linspace(0, slice_smoothed.shape[0], n_output_cells)
output_slice = smoothing.interpolate2d(slice_smoothed, idx, idx, order=order)
return output_slice
def bin_lightcone_in_frequency(lightcone, z_low, box_size_mpc, dnu):
'''
Bin a lightcone in frequency bins.
Parameters:
lightcone (ndarray): the lightcone in length units
z_low (float): the lowest redshift of the lightcone
box_size_mpc (float): the side of the lightcone in Mpc
dnu (float): the width of the frequency bins in MHz
Returns:
* The lightcone, binned in frequencies with high frequencies first
* The frequencies along the line of sight in MHz
'''
#Figure out dimensions and make output volume
cell_size = box_size_mpc/lightcone.shape[0]
distances = cm.z_to_cdist(z_low) + np.arange(lightcone.shape[2])*cell_size
input_redshifts = cm.cdist_to_z(distances)
input_frequencies = cm.z_to_nu(input_redshifts)
nu1 = input_frequencies[0]
nu2 = input_frequencies[-1]
output_frequencies = np.arange(nu1, nu2, -dnu)
output_lightcone = np.zeros((lightcone.shape[0], lightcone.shape[1], \
len(output_frequencies)))
#Bin in frequencies by smoothing and indexing
max_cell_size = cm.nu_to_cdist(output_frequencies[-1])-cm.nu_to_cdist(output_frequencies[-2])
smooth_scale = np.round(max_cell_size/cell_size)
if smooth_scale < 1:
smooth_scale = 1
hf.print_msg('Smooth along LoS with scale %f' % smooth_scale)
tophat3d = np.ones((1,1,int(smooth_scale)))
tophat3d /= | np.sum(tophat3d) | numpy.sum |
import numpy as np
import plotly as py
import plotly.graph_objs as go
import random
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import datetime as dt
from typing import Dict
def plotly_wordcloud(words: Dict[str, int]):
lower, upper = 15, 45
frequency = [((x - min(words.values())) / (max(words.values()) - min(words.values()))) * (upper - lower) + lower
for x in words.values()]
if np.isnan( | np.sum(frequency) | numpy.sum |
#This weeks code focuses on understanding basic functions of pandas and numpy
#This will help you complete other lab experiments
# Do not change the function definations or the parameters
import numpy as np
import pandas as pd
#input: tuple (x,y) x,y:int
def create_numpy_ones_array(shape):
#return a numpy array with one at all index
array=None
#TODO
array = np.ones(shape, dtype = np.int8)
return array
#input: tuple (x,y) x,y:int
def create_numpy_zeros_array(shape):
#return a numpy array with zeros at all index
array=None
#TODO
array = np.zeros(shape, dtype = np.int8)
return array
#input: int
def create_identity_numpy_array(order):
#return a identity numpy array of the defined order
array=None
#TODO
array = np.identity(order, dtype = np.int8)
return array
#input: numpy array
def matrix_cofactor(matrix):
#return cofactor matrix of the given array
array=None
#TODO
newMatrix = []
try:
array = np.linalg.inv(matrix).T * np.linalg.det(matrix)
except:
for i in range(len(matrix)):
temp = []
for j in range(len(matrix[i])):
minor = matrix[np.array(list(range(i))+list(range(i+1,matrix.shape[0])))[:,np.newaxis],np.array(list(range(j))+list(range(j+1,matrix.shape[1])))]
temp.append(np.linalg.det(minor))
newMatrix.append(temp)
array = np.array(newMatrix)
return array
#Input: (numpy array, int ,numpy array, int , int , int , int , tuple,tuple)
#tuple (x,y) x,y:int
def f1(X1,coef1,X2,coef2,seed1,seed2,seed3,shape1,shape2):
#note: shape is of the forst (x1,x2)
#return W1 x (X1 ** coef1) + W2 x (X2 ** coef2) +b
# where W1 is random matrix of shape shape1 with seed1
# where W2 is random matrix of shape shape2 with seed2
# where B is a random matrix of comaptible shape with seed3
# if dimension mismatch occur return -1
ans=None
#TODO
try:
np.random.seed(seed1)
W1 = np.random.rand(shape1[0], shape1[1])
np.random.seed(seed2)
W2 = np.random.rand(shape2[0], shape2[1])
ans = np.add(np.matmul(W1,(X1 ** coef1)), np.matmul(W2, X2 ** coef2))
shape = np.shape(ans)
np.random.seed(seed3)
b = | np.random.rand(shape[0], shape[1]) | numpy.random.rand |
import numpy as np
from keras import backend as K
import cv2
import os
import shutil
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.preprocessing.image import img_to_array
from keras.optimizers import SGD
def hard_sigmoid(x):
i = 0
y = np.zeros((1, len(x[0,:])))
for x_i in x[0,:]:
if x_i < -2.5:
y_i = 0
elif x_i >2.5:
y_i = 1
else:
y_i = 0.2*x_i+0.5
y[0,i] = y_i
i = i+1
return y
def lp_norm(p,n1,n2):
n1 = np.array([n1]).ravel()
n2 = np.array([n2]).ravel()
m = np.count_nonzero(n1-n2)
return np.linalg.norm(n1-n2,ord=p)/float(m)
def l2_norm(n1,n2):
n1 = np.array([n1]).ravel()
n2 = np.array([n2]).ravel()
m = np.count_nonzero(n1-n2)
return np.linalg.norm(n1-n2,ord=2)/float(m)
def getActivationValue(model,layer,test):
#print("xxxx %s"%(str(self.model.layers[1].input.shape)))
OutFunc = K.function([model.input], [model.layers[layer].output])
out_val = OutFunc([test, 1.])[0]
return np.squeeze(out_val)
def layerName(model,layer):
layerNames = [layer.name for layer in model.layers]
return layerNames[layer]
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def extract_vgg16_features(model, video_input_file_path, feature_output_file_path):
if os.path.exists(feature_output_file_path):
return np.load(feature_output_file_path)
count = 0
print('Extracting frames from video: ', video_input_file_path)
vidcap = cv2.VideoCapture(video_input_file_path)
success, image = vidcap.read()
features = []
success = True
while success:
vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000)) # added this line
success, image = vidcap.read()
# print('Read a new frame: ', success)
if success:
img = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
input = img_to_array(img)
input = np.expand_dims(input, axis=0)
input = preprocess_input(input)
feature = model.predict(input).ravel()
features.append(feature)
count = count + 1
unscaled_features = | np.array(features) | numpy.array |
import os
import glob
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
import scipy.ndimage as nd
from astropy.table import Table
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from astropy.modeling import models, fitting
from astropy.modeling.models import Gaussian2D
from scipy.signal import fftconvolve
import mpdaf.obj
from mpdaf.obj import airtovac, vactoair
gau = models.Gaussian1D(mean=0, stddev=1)
from grizli.utils_c import interp
from grizli import utils
utils.set_warnings()
band_lims = {'Y': (9600, 11360),
'J': (11440, 13560),
'H': (14580, 18150),
'K': (18880, 24160)}
plt.rcParams['figure.max_open_warning'] = 100
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['image.interpolation'] = 'Nearest'
def optimal_extract(file, show_sn=False, slx=slice(300,-300), prof_model=models.Gaussian1D, prof_keys=['mean','stddev'], fit_profile=False, rescale_errors=True, prof_sigma=3, prof_offset=0, escl=2, zi=0, binf=64, flux_corr=None, to_vacuum=False, limits_2d=[-0.18, 0.18], gwidth=(4,2), clip_edge=5, suffix=''):
"""
Optimal extraction from 2D fits file
"""
#print(file)
im = pyfits.open(file)
valid = np.isfinite(im[0].data)
if valid.sum() == 0:
print('Bad spectrum')
return False
band = im[0].header['FILTER']
sh = im[0].data.shape
if file.endswith('_sp.fits'):
is_mine = True
wht = im['WHT'].data
valid &= wht > 0
sig = 1/np.sqrt(wht)
sig[~valid] = 0
else:
sig = pyfits.open(file.replace('_eps','_sig'))[0].data
is_mine = False
yp, xp = np.indices(sh)
oky = valid.sum(axis=0)
vx = oky > 3
xarr = np.arange(sh[-1])
if is_mine:
h = im[0].header
lam_coeffs = np.array([h[f'LAMCOEF{i}'] for i in range(h['LAMORDER']+1)])
wave = np.polyval(lam_coeffs, xarr-h['CRPIX1'])
#print('xx', wave.min(), wave.max(), lam_coeffs)
wcs = pywcs.WCS(im[0].header)
wave = wcs.all_pix2world(xarr, xarr*0., 0)[0]*1.e10
#sh = outsci.shape
#xarr = np.arange(sh[1])
yp = np.ones(sh[1]) + sh[0]/2
lam_wcs = pywcs.WCS(im[0].header)
#wave, _y = lam_wcs.all_pix2world(xarr, yp, 0)
#wave *= 1.e10 # A
#pix_lim = np.interp(band_lims[band], wave, xarr, left=0, right=2048)
pix_lim = (np.array(band_lims[band])-lam_coeffs[-1])/lam_coeffs[-2] + h['CRPIX1']
else:
wcs = pywcs.WCS(im[0].header)
wave = wcs.all_pix2world(xarr, xarr*0., 0)[0]*1.e10
pix_lim = wcs.all_world2pix(np.array(band_lims[band])/1.e10, [0,0], 0)[0] #wave[iw], xarr[iw])
if flux_corr is not None:
try:
to_flam = flux_corr[0](flux_corr[1], wave)
except:
to_flam = flux_corr(wave)
vx &= np.isfinite(to_flam)
if band in ['K']:
vx &= to_flam/np.nanmin(to_flam) < 6
else:
vx &= to_flam/np.nanmin(to_flam) < 6
else:
to_flam = 1.0
vx = np.where(vx)[0]
xvalid = vx.min(), vx.max()
valid &= (xp > xvalid[0]) & (xp < xvalid[1])
if slx is None:
slx = slice(*xvalid)
if is_mine:
targname = os.path.basename(file.split('_sp.fits')[0]) + suffix
else:
targname = os.path.basename(file.split('_eps.fits')[0]) + suffix
if os.path.exists(file.replace('_eps','_itime')):
itime = pyfits.open(file.replace('_eps','_itime'))
exptime = np.nanpercentile(itime[0].data[itime[0].data > 0], [50,90])
else:
if is_mine:
exptime = im[0].header['EXPTIME'], im[0].header['EXPTIME']
else:
exptime = [0,0]
y0 = im[0].header['CRPIX2']-im[0].header['CRVAL2']
if gwidth is not None:
ivar = 1/sig**2
ivar[~np.isfinite(ivar) | ~valid] = 0
gau = Gaussian2D(x_mean=0, x_stddev=gwidth[0], y_mean=0, y_stddev=gwidth[1])
xgarr = np.arange(-4*gwidth[0], 4.1*gwidth[0], 1)
ygarr = np.arange(-4*gwidth[1], 4.1*gwidth[1], 1)
xp, yp = np.meshgrid(xgarr, ygarr)
gm = gau(xp, yp)
sci = im[0].data*1
sci[~valid] = 0
num = fftconvolve(sci*ivar, gm, mode='same')
den = fftconvolve(ivar, gm**2, mode='same')
smoothed = num/den*valid
if show_sn:
smoothed *= np.sqrt(den)
yarr = np.arange(smoothed.shape[0])
ysl = np.abs(yarr-y0) < 10
perc = np.nanpercentile(smoothed[ysl,:][valid[ysl,:]], [16,50,84])
limits_2d = perc[1] - 3*np.diff(perc)[0], perc[1] + 3*np.diff(perc)[1]
if show_sn:
lmax = np.clip(limits_2d[1], 5, 40)
#print('xxx', lmax)
limits_2d = [-lmax, lmax]
else:
smoothed = im[0].data
smoothed[~valid] = 0
figs = []
fig, axes = plt.subplots(1,2, figsize=(12,3), gridspec_kw={'width_ratios':[3,1]}, sharey=True)
ax = axes[0]
figs.append(fig)
ax.imshow(smoothed, origin='lower', vmin=limits_2d[0], vmax=limits_2d[1], cmap='gray')
ax.set_aspect('auto')
if slx.stop < 0:
ax.vlines([slx.start, sh[1]+slx.stop], 0, sh[0]-1, color='w', linewidth=3, alpha=0.35)
ax.vlines([slx.start, sh[1]+slx.stop], 0, sh[0]-1, color='r', linewidth=1, alpha=0.35)
else:
ax.vlines([slx.start, slx.stop], 0, sh[0]-1, color='w', linewidth=3, alpha=0.35)
ax.vlines([slx.start, slx.stop], 0, sh[0]-1, color='r', linewidth=1, alpha=0.35)
ax.hlines(y0+np.array([-10,10]), 0, sh[1]-1,
color='w', linewidth=3, alpha=0.35)
ax.hlines(y0+np.array([-10,10]), 0, sh[1]-1,
color='r', linewidth=1, alpha=0.35)
ivar = 1/sig**2
sci = im[0].data*1
imask = (sig == 0) | ~np.isfinite(ivar) | ~np.isfinite(sci) | (~valid)
ivar[imask] = 0
sci[imask] = 0
iw = np.where(np.isfinite(wave))[0]
#print('x limits: ', sh, pix_lim)
ax.set_xlim(*pix_lim)
ax.xaxis.set_major_locator(MultipleLocator(200))
#ax.set_xticklabels([])
xt = ax.get_xticks()
ax.set_xticks(xt[2:-2])
ax.set_xticklabels(np.cast[int](xt[2:-2]))
yt = ax.get_yticks()
for j in [-3, -2]:
ax.text(0.01*(pix_lim[1]-pix_lim[0])+pix_lim[0], yt[j], f'{int(yt[j])}', ha='left', va='center',
fontsize=7, bbox=dict(edgecolor='None', facecolor='w', alpha=0.9))
ax.set_yticklabels([])
new_sci, new_ivar = sci, ivar
ax.text(0.98, 0.98, targname, ha='right', va='top',
transform=ax.transAxes, fontsize=8, bbox=dict(edgecolor='None', facecolor='w', alpha=0.9))
#fig, ax = plt.subplots(1,1,figsize=(5,5))
ax = axes[1]
#figs.append(fig)
prof = (new_sci*new_ivar)[:,slx].sum(axis=1)/(new_ivar[:,slx].sum(axis=1))
yarr = np.arange(len(prof))*1.
ax.plot(prof, yarr)
y0 = im[0].header['CRPIX2']-im[0].header['CRVAL2']+prof_offset
keys = {prof_keys[0]:y0, prof_keys[1]:prof_sigma}
prof_mask = np.isfinite(prof) & (np.abs(yarr-y0) < 10)
#print('Prof: ', yarr.shape, prof_mask.sum())
gau = prof_model(amplitude=prof[prof_mask].max(), **keys)
gau.bounds['mean'] = (y0-8,y0+8)
gau.bounds['stddev'] = (1, 4)
fit_status = True
if fit_profile:
fitter = fitting.LevMarLSQFitter()
try:
gau = fitter(gau, yarr[prof_mask & (prof > 0)], prof[prof_mask & (prof > 0)])
except:
fit_status = False
if fit_status:
prof_offset = gau.parameters[gau.param_names.index(prof_keys[0])] + im[0].header['CRVAL2']
ax.plot(gau(yarr), yarr)
ymax = 1.5*gau.amplitude.value
ax.set_xlim(-ymax, ymax)
ax.set_xticklabels([])
ax.text(gau.amplitude.value, 0.05*sh[0], f'{gau.amplitude.value:.4f}',
ha='center', va='center', bbox=dict(edgecolor='None', facecolor='w', alpha=0.9))
#ax.plot(yarr[prof_mask], gau(yarr)[prof_mask])
ax.hlines(y0+np.array([-10,10]),*plt.xlim(),
color='r', linewidth=1, alpha=0.35)
ax.grid()
fig.tight_layout(pad=0.5)
#fig.savefig(file.replace('_eps.fits','_extract.png'))
if not fit_status:
return {'fig_extract':fig}
yfull = np.linspace(yarr[0], yarr[-1], 1024)
gnorm = np.trapz(gau(yfull), yfull)
gprof = gau(yarr)/gnorm
num = (new_sci*new_ivar/escl**2*gprof[:,None]).sum(axis=0)
den = (gprof[:,None]**2*new_ivar/escl**2).sum(axis=0)
opt_flux = num/den
opt_err = 1/np.sqrt(den)
if rescale_errors:
ok = np.isfinite(opt_flux + opt_err) & (den > 0)
sn = (opt_flux/opt_err)[ok]
if np.median(sn) < 10:
df = np.diff(opt_flux[ok])
de = np.sqrt(opt_err[ok][1:]**2+opt_err[ok][:-1]**2)
scl = utils.nmad(df/de)
print(f'Rescale uncertainties: {scl:.3f}')
opt_err *= scl
else:
print(f'Rescale uncertainties (med SN={np.median(sn):.2f})')
#fig, ax = plt.subplots(1,1, figsize=(9, 3))
fig, axes = plt.subplots(1,2, figsize=(12,3), gridspec_kw={'width_ratios':[3,1]}, sharey=True)
ax = axes[0]
axes[1].axis('off')
figs.append(fig)
#ax.plot(wave/(1+zi), sp[0].data)
#ax.plot(wave/(1+zi), sp[1].data)
opt_ivar = 1/opt_err**2
ax.set_ylim(-0.05, 0.1)
#ax.set_xlim(0.98e4/(1+zi), 1.04e4/(1+zi))
# Lines
xline = opt_ivar < 0.7*np.median(opt_ivar)
opt_ivar[xline] *= 0. #0.05
opt_flux *= to_flam
opt_err *= to_flam
opt_ivar /= to_flam**2
ok_idx = np.where(np.isfinite(opt_ivar + opt_flux + to_flam))[0]
if len(ok_idx) > 2*clip_edge:
# print('Clip edge', len(ok_idx), len(opt_ivar))
ok_idx = ok_idx[clip_edge:-clip_edge]
opt_mask = np.ones(len(opt_ivar), dtype=bool)
opt_mask[ok_idx] = False
opt_ivar[opt_mask] = 0
opt_err[opt_mask] = 1e8
opt_flux[opt_mask] = 0
ax.plot(wave/(1+zi), opt_flux, alpha=0.4, color='0.5')
ax.plot(wave/(1+zi), opt_err, alpha=0.5, color='pink')
bkern = np.ones(binf)
bnum = nd.convolve1d(opt_flux*opt_ivar, bkern)[binf//2::binf]
bwnum = nd.convolve1d(wave*opt_ivar, bkern)[binf//2::binf]
bden = nd.convolve1d(opt_ivar, bkern)[binf//2::binf]
bflux = bnum/bden
berr = 1/np.sqrt(bden)
bwave = bwnum/bden
ymax = np.percentile(bflux[np.isfinite(bflux)], 90)*5
ax.set_ylim(-0.5*ymax, ymax)
#ax.set_ylim(-0.05, 0.11)
ax.set_xlim(*(bwave[np.isfinite(bwave)][np.array([0,-1])]/(1+zi)))
xl = ax.get_xlim()
if (zi > 0):
ax.set_xlim(6500, 6800)
ax.vlines([3727., 4102.9, 4341.7, 4862., 4960., 5008., 6302, 6563., 6548, 6584,
6679., 6717, 6731, 7137.77, 7321.94, 7332.17],
ax.get_ylim()[0], 0, color='r', linestyle=':')
ax.set_xlabel(f'rest wave, z={zi:.4f}')
ax.set_xlim(*xl)
ax.text(0.98, 0.98, targname, ha='right', va='top',
transform=ax.transAxes, fontsize=8, bbox={'edgecolor':'None', 'facecolor':'w'})
if (zi > 6):
ax.vlines([1216.], *ax.get_ylim(), color='r', linestyle=':')
ax.errorbar(bwave/(1+zi), bflux, berr, color='k', alpha=0.4, linestyle='None', marker='.')
ax.plot(wave/(1+zi), wave*0., color='k', linestyle=':')
ok = np.isfinite(bflux+bwave)
utils.fill_between_steps(bwave[ok]/(1+zi), bflux[ok], bflux[ok]*0.,
ax=ax, color='orange', alpha=0.5, zorder=-1)
ax.set_xlim(*band_lims[band])
yt = ax.get_yticks()
for j in [0, yt[-3]]:
if j == 0:
labl = '0'
else:
if j < 0.1:
labl = f'{j:.2f}'
elif j < 1:
labl = f'{j:.1f}'
else:
labl = f'{int(j)}'
ax.text(0.01*(band_lims[band][1]-band_lims[band][0])+band_lims[band][0],
j, labl, ha='left', va='center',
fontsize=7, bbox=dict(edgecolor='None', facecolor='w', alpha=0.9))
ax.set_yticklabels([])
fig.tight_layout(pad=0.5)
#fig.savefig(file.replace('_eps.fits','_spec.png'))
if to_vacuum:
try:
wave = airtovac(wave)
bwave = airtovac(bwave)
except:
pass
spec = {'wave':wave, 'opt_flux':opt_flux, 'opt_err':opt_err,
'wave_bin':bwave, 'bin_flux':bflux, 'bin_err':berr}
spec['yarr'] = yarr
spec['prof_model'] = gau
spec['gprof'] = gau(yarr)
spec['prof'] = prof
spec['prof_offset'] = prof_offset
spec['fig_extract'] = figs[0]
spec['fig_1d'] = figs[1]
spec['to_flam'] = to_flam
spec['targname'] = targname
spec['im'] = im
spec['file'] = file
spec['filter'] = band
spec['xarr'] = xarr
spec['shape'] = sh
tab = utils.GTable()
tab['wave'] = wave.astype(np.float32)
tab['flux'] = opt_flux.astype(np.float32)
tab['err'] = opt_err.astype(np.float32)
tab.meta['ny'], tab.meta['nx'] = sh
tab['ny'] = oky
tab.meta['slx0'] = slx.start, '2D slice start'
tab.meta['slx1'] = slx.stop, '2D slice stop'
tab['to_flam'] = np.cast[np.float32](to_flam)
tab.meta['itime50'] = exptime[0], 'Median exposure time in 2D'
tab.meta['itime90'] = exptime[1], '90th percentile 2D exposure time'
tab.meta['wmin'] = tab['wave'][opt_ivar > 0].min(), 'Min valid wavelength'
tab.meta['wmax'] = tab['wave'][opt_ivar > 0].max(), 'Max valid wavelength'
snperc = np.nanpercentile((tab['flux']/tab['err'])[opt_ivar > 0], [16, 50, 84, 99])
tab.meta['sn16'] = snperc[0], 'SN 16th percentile'
tab.meta['sn50'] = snperc[1], 'SN median'
tab.meta['sn84'] = snperc[2], 'SN 84th percentile'
tab.meta['sn99'] = snperc[3], 'SN 99th percentile'
tab.meta['slitnum'] = im[0].header['SLITNUM'], 'Mask slit number'
tab.meta['slitidx'] = im[0].header['SLITIDX'], 'Mask slit index'
tab.meta['prof_amp'] = spec['prof_model'].amplitude.value, 'Profile model amplitude'
tab.meta['prof_sig'] = spec['prof_model'].stddev.value, 'Profile model sigma'
tab.meta['prof_mu'] = spec['prof_model'].mean.value, 'Profile model mean'
ima = np.nanargmax(prof)
tab.meta['prof_yma'] = yarr[ima], 'Location of profile max'
tab.meta['prof_ma'] = prof[ima], 'Profile max'
imi = np.nanargmin(prof)
tab.meta['prof_ymi'] = yarr[imi], 'Location of profile min'
tab.meta['prof_mi'] = prof[imi], 'Profile min'
for k in ['prof_offset','file','filter','targname']:
tab.meta[k] = spec[k]
stats = {}
cols = ['SKYPA3','AIRMASS','GUIDFWHM']
tr = {'SKYPA3':'pa','AIRMASS':'airm','GUIDFWHM':'fwhm'}
for k in cols:
stats[k] = []
for ki in spec['im'][0].header:
if '_img' not in ki:
continue
ks = ki.split('_img')[0]
if ks in cols:
stats[ks].append(spec['im'][0].header[ki])
for k in cols:
if len(stats[k]) == 0:
stats[k].append(0)
for k in stats:
#print(k, len(stats[k]), np.median(stats[k]))
tab.meta['{0}_min'.format(tr[k])] = np.nanmin(stats[k]), f'Min {k}'
tab.meta['{0}'.format(tr[k])] = np.nanmedian(stats[k]), f'Median {k}'
tab.meta['{0}_max'.format(tr[k])] = np.nanmax(stats[k]), f'Max {k}'
# full_path = os.path.join(os.getcwd(), file)
# full_path = file
tab.meta['file'] = os.path.basename(file), 'Extraction filename'
tab.meta['path'] = os.path.dirname(file), 'File path'
tab.meta['datemask'] = im[0].header['DATEMASK'], 'Unique mask identifier'
spec['opt_spec'] = tab
return spec
##################
## Find peak
def find_max(file, gwidth=(5,2), pad=10, erode=10, suffix=''):
"""
Find peak S/N in 2D spectrum file
"""
import scipy.ndimage as nd
im = pyfits.open(file)
valid = np.isfinite(im[0].data)
if erode:
valid = nd.binary_erosion(valid, iterations=erode)
if valid.sum() == 0:
return (-1, (0,0), 0)
if file.endswith('_sp.fits'):
targname = os.path.basename(file.split('_sp.fits')[0]) + suffix
is_mine = True
wht = im['WHT'].data
valid &= wht > 0
sig = 1/ | np.sqrt(wht) | numpy.sqrt |
import numpy as np
import nanocut.common as nc
from nanocut.output import error, printstatus
__all__ = [ "Periodicity", ]
def gcd(numbers):
"""Calculates greatest common divisor of a list of numbers."""
aa = numbers[0]
for bb in numbers[1:]:
while bb:
aa, bb = bb, aa % bb
return aa
def plane_axis_from_miller(miller):
"""Returns two vectors in a plane with given miller index.
Args:
miller: Miller indices of the plane (array of 3 integers)
Returns:
Two 3D vectors in relative coordinates, both being vectors
in the plane. It returns the shortest possible vectors.
"""
# Separate zero and nonzero components of Miller vector
nonzero = np.flatnonzero( | np.not_equal(miller, 0) | numpy.not_equal |
import numpy as np
from tqdm import tqdm
import utils.helper as hlp
def slidewindow(ts, horizon=.2, stride=0.2):
xf = []
yf = []
for i in range(0, ts.shape[0], int(stride * ts.shape[0])):
horizon1 = int(horizon * ts.shape[0])
if (i + horizon1 + horizon1 <= ts.shape[0]):
xf.append(ts[i:i + horizon1,0])
yf.append(ts[i + horizon1:i + horizon1 + horizon1, 0])
xf = np.asarray(xf)
yf = np.asarray(yf)
return xf, yf
def cutPF(ts, perc=.5):
seq_len = ts.shape[0]
new_ts = ts.copy()
t=int(perc*seq_len)
return new_ts[:t, ...], new_ts[t:, ...]
def cutout(ts, perc=.1):
seq_len = ts.shape[0]
new_ts = ts.copy()
win_len = int(perc * seq_len)
start = np.random.randint(0, seq_len-win_len-1)
end = start + win_len
start = max(0, start)
end = min(end, seq_len)
# print("[INFO] start={}, end={}".format(start, end))
new_ts[start:end, ...] = 0
# return new_ts, ts[start:end, ...]
return new_ts
def cut_piece2C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len/(2*2)
if perc<1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len-win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1-start2)<(win_class):
label=0
else:
label=1
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece3C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len/(2*3)
if perc<1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len-win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1-start2)<(win_class):
label=0
elif abs(start1-start2)<(2*win_class):
label=1
else:
label=2
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece4C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 4)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
else:
label = 3
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece5C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 5)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
else:
label = 4
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece6C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 6)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
else:
label = 5
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece7C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 7)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
elif abs(start1 - start2) < (6 * win_class):
label = 5
else:
label = 6
return ts[start1:end1, ...], ts[start2:end2, ...], label
def cut_piece8C(ts, perc=.1):
seq_len = ts.shape[0]
win_class = seq_len / (2 * 8)
if perc < 1:
win_len = int(perc * seq_len)
else:
win_len = perc
start1 = np.random.randint(0, seq_len - win_len)
end1 = start1 + win_len
start2 = np.random.randint(0, seq_len - win_len)
end2 = start2 + win_len
if abs(start1 - start2) < (win_class):
label = 0
elif abs(start1 - start2) < (2 * win_class):
label = 1
elif abs(start1 - start2) < (3 * win_class):
label = 2
elif abs(start1 - start2) < (4 * win_class):
label = 3
elif abs(start1 - start2) < (5 * win_class):
label = 4
elif abs(start1 - start2) < (6 * win_class):
label = 5
elif abs(start1 - start2) < (7 * win_class):
label = 6
else:
label = 7
return ts[start1:end1, ...], ts[start2:end2, ...], label
def jitter(x, sigma=0.03):
# https://arxiv.org/pdf/1706.00527.pdf
return x + np.random.normal(loc=0., scale=sigma, size=x.shape)
def scaling(x, sigma=0.1):
# https://arxiv.org/pdf/1706.00527.pdf
factor = np.random.normal(loc=1., scale=sigma, size=(x.shape[0],x.shape[2]))
return | np.multiply(x, factor[:,np.newaxis,:]) | numpy.multiply |
import math as mt
import numpy as np
import byxtal.find_csl_dsc as fcd
import byxtal.integer_manipulations as iman
import byxtal.bp_basis as bpb
import byxtal.pick_fz_bpl as pfb
import numpy.linalg as nla
import ovito.data as ovd
from ovito.pipeline import StaticSource, Pipeline
import ovito.modifiers as ovm
from ovito.data import CutoffNeighborFinder
def find_int_solns(a_vec, b_vec):
"""
Given two basis vectors (a_vec and b_vec) in the primitive basis,
find the third basis vector (c_vec) such that the matrix
[a_vec, b_vec, c_vec] is a valid basis.
All the components of the vectors are integers and
the determinant of the matrix must be equal to **1**.
Parameters
-----------------
a_vec: numpy.array
The first basis vector. Must be an integer array.
b_vec: numpy.array
The second basis vector. Must be an integer array.
Returns
------------
l_p2_p1: numpy.array, (3X3, must be an integer array)
A 3x3 numpy array of integers that forms the new basis for the lattice.
"""
a1 = a_vec[0]
a2 = a_vec[1]
a3 = a_vec[2]
b1 = b_vec[0]
b2 = b_vec[1]
b3 = b_vec[2]
a = a2*b3 - a3*b2
b = -(a1*b3 - a3*b1)
c = a1*b2 - a2*b1
d = 1
a = int(a)
b = int(b)
c = int(c)
d = int(d)
p = mt.gcd(a, b)
if p == 0:
if c == 1:
y1 = 0
y2 = 0
y3 = 1
# l_p2_p1 = (np.hstack((a_vec, b_vec, np.array([[y1],[y2],[y3]]))))
l_p2_p1 = np.dstack((a_vec, b_vec, np.array([y1, y2, y3]))).squeeze()
det1 = nla.det(l_p2_p1)
if ((np.abs(det1)-1) > 1e-10):
raise Exception('Error with Diophantine solution')
else:
if det1 == -1:
l_p2_p1[:, 2] = -l_p2_p1[:, 2]
else:
raise Exception('Error with boundary-plane indices')
else:
a1 = int(a/p)
b1 = int(b/p)
# Let u0 and v0 any solution of a'u + b'v = c
int_soln1 = bpb.lbi_dioph_soln(a1, b1, c)
u0 = int(int_soln1[0])
v0 = int(int_soln1[1])
# z0, t0 any solution of cz + pt = d
int_soln2 = bpb.lbi_dioph_soln(c, p, d)
z0 = int(int_soln2[0])
t0 = int(int_soln2[1])
# x0, y0 any solution of a'x + b'y = t0
int_soln3 = bpb.lbi_dioph_soln(a1, b1, t0)
x0 = int(int_soln3[0])
y0 = int(int_soln3[1])
# The general solution of ax + by + cz = d is :
# x = x0 + b'k - u0m
# y = y0 - a'k - v0m
# z = z0 + pm with k and m any integer in \mathbb{Z}
tn1 = 10
ival = | np.arange(-(tn1), tn1+1) | numpy.arange |
#file reading portion of 190621_accel_combined only
import os
import glob
from datetime import datetime, timedelta
import time
import csv
import numpy as np
import statistics
import json
import geopy.distance
import urllib.request
from scipy import interpolate
from scipy import fft
from scipy import signal
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
from accelinfo import getseiscoords, getfilepath
#%%
filename = max(glob.iglob(getfilepath()), key=os.path.getctime) #get path of most recent data file
#starttime_s = os.path.getmtime(filename)
#starttime_s = os.path.getctime(filename)
print('Reading metadata')
with open(filename, newline='\n') as f:
reader = csv.reader(f)
metadata = next(reader)
metadatapresent = True
if 'PDT' in metadata[0]: #if timezone is PDT
starttime_s = metadata[0].strip('metadata: PDT')
elif 'UTC' in metadata[0]: #if timezone is UTC
starttime_s = metadata[0].strip('metadata: UTC')
elif 'metadata' not in metadata[0]:
#convert filename to starttime
starttime = os.path.basename(filename)
starttime = datetime.strptime(starttime.replace('_accel.csv',''),'%Y%m%d_%H%M')
#starttime = filename[15:27]
#starttime = starttime_s.replace('_','')
#yeartime = starttime[0:3]
#convert datetime object to seconds
starttime_s = starttime.timestamp()
metadatapresent = False #set metadatapresent
else: #tries to handle messed up time from first files
starttime_s = metadata[0].strip('metadata: ')
starttime_s = starttime_s.replace('-',',')
starttime_s = starttime_s.replace(' ',',')
starttime_s = starttime_s.replace(':',',')
starttime_s = list(starttime_s)
if starttime_s[5] == 0:
starttime_s[5] = ''
if starttime_s[8] == 0:
starttime_s[8] = ''
starttime_s[19:26] = ''
starttime_s = ''.join(starttime_s)
counter = 0
counter = int(counter)
for item in starttime_s:
starttime_s[counter] = int(starttime_s[counter])
counter = counter + 1
starttime_s = (datetime(starttime_s) - datetime(1970,1,1)).total_seconds()
if metadatapresent == True:
accelunits = metadata[1]
timeunits = metadata[2]
sensorname = metadata[3]
comstandard = metadata[4]
accelprecision = 'none' #set precision to 'none' if none is specified
if len(metadata) > 5:
accelprecision = metadata[5] #precision = number of digits after the decimal
else:
accelunits = 'g'
timeunits = 'ms'
sensorname = 'unknown'
comstandard = 'serial'
accelprecision = 'none'
#%%
print('Reading file')
with open(filename) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
accelx = []
accely = []
accelz = []
timems = []
fullrow = []
skippedtotal = 0
skippedrowlen = 0
skippedsplit = 0
skippedaxis = 0
skippedt = 0
skippedrows = []
#lengthaccellow = 13
#lengthaccelhigh = 15
if accelprecision == 'none': #if no precision set
rowlenlow = 43
rowlenhigh = 56
lengthaccellow = 13
lengthaccelhigh = 15
else: #if precision set, set length limits based on precision
lengthaccellow = accelprecision + 2
lengthaccelhigh = accelprecision + 4
rowlenlow = (lengthaccellow * 3) + 4
rowlenhigh = (lengthaccelhigh * 3) + 9
for row in readCSV: #step through rows in file
fullrow = row[0]
if len(row[0]) < rowlenlow: #if row is too short, skip
#print(len(fullrow))
skippedtotal = skippedtotal + 1
skippedrowlen = skippedrowlen + 1
#print(fullrow)
continue
if len(row[0]) > rowlenhigh: #if row is too long, skip
skippedtotal = skippedtotal + 1
skippedrowlen = skippedrowlen + 1
#print(fullrow)
#print(len(fullrow))
continue
fullrow = row[0].split(',') #split row into sections at commas
#print(fullrow)
if len(fullrow) != 4: #if wrong number of commas, skip
skippedtotal = skippedtotal + 1
skippedsplit = skippedsplit + 1
#print(fullrow)
continue
#print(fullrow) #print whole row
x = fullrow[0]
x = str(float(x))
if (len(x) < lengthaccellow) and (len(x) > lengthaccelhigh):
skippedtotal = skippedtotal + 1
skippedaxis = skippedaxis + 1
#print(fullrow)
continue
y = fullrow[1]
y = str(float(y))
if (len(y) < lengthaccellow) and (len(y) > lengthaccelhigh):
skippedtotal = skippedtotal + 1
skippedaxis = skippedaxis + 1
#print(fullrow)
continue
z = fullrow[2]
z = str(float(z))
if (len(z) < lengthaccellow) and (len(z) > lengthaccelhigh):
skippedtotal = skippedtotal + 1
skippedaxis = skippedaxis + 1
#print(fullrow)
continue
#print('here')
t = fullrow[3]
t.strip()
if (len(t) > 9) or (len(t) < 1):
skippedtotal = skippedtotal + 1
skippedt = skippedt + 1
#print(fullrow)
continue
accelx.append(x)
accely.append(y)
accelz.append(z)
timems.append(t)
#convert data arrays into stuff matplotlib will accept
print('Converting data arrays')
accelx = np.array(accelx)
accelx = accelx.astype(np.float)
accely = np.array(accely)
accely = accely.astype(np.float)
accelz = np.array(accelz)
accelz = accelz.astype(np.float)
timems = np.array(timems)
timems = timems.astype(np.float)
#convert timems to time_s
print('Converting ms to S')
starttime_s = np.array(starttime_s)
starttime_s = starttime_s.astype(np.float)
time_s = [] #initialize arry
time_s = [((x/1000)+starttime_s) for x in timems] #time_s = timems converted to s and added to the start time
endtime_s = time_s[-1] #get end time by reading last value in time_s
#calculate statistics
print('Calculating statistics')
timediff = | np.diff(time_s) | numpy.diff |
# coding: utf-8
import os, pickle, csv, json
import subprocess
from typing import NamedTuple, List, TextIO, Tuple, Dict, Optional, Union, Iterable, Hashable
import numpy as np
import pandas as pd
from scipy import stats
from itertools import product, groupby, takewhile
from collections import namedtuple, Counter
import multiprocessing
import logging
import string
import matplotlib
matplotlib.use("Agg")
# pids with missing data (i.e., pdbs missing for either sid, eid, and/or gid)
pids_missing_data = {'2000524',
'2001234',
'2001249',
'2001255',
'2001287',
'2001291',
'2001306',
'2001308',
'2001311',
'2002239',
'2002243',
'2002247',
'2002255',
'2002713',
'2002963',
'2002990',
'2002992',
'2003008',
'2003011',
'2003015',
'997529',
'996023'}
unfetched_pids = {'2000659',
'2001302',
'2002102',
'2002465',
'2002809',
'2002833',
'2002850',
'2003001',
'2003047',
'2003059',
'2003078',
'2003126',
'2003183',
'996313',
'996492',
'996508',
'997542',
'997940',
'998465',
'998529',
'998574'}
# fetched, but corrupt
bad_pids = {'1998935',
'2000659',
'2001302',
'2002102',
'2002465',
'2002809',
'2002833',
'2002850',
'2003078',
'2003126',
'2003183',
'2003763',
'2003832',
'997766'}
# stopped early due to crashes or errors
stopped_pids = {'2003699',
'2003183',
'2002494',
'2002247',
'2002912',
'2003801'}
# restarted version of stopped puzzle
restarted_pids = {'2003704',
'2002499',
'2002255',
'2002914',
'2003806'}
pids_missing_energies = {'996547'}
pids_missing_pdl_actions = {'998071',
'1998729',
'998219'}
skip_pids = pids_missing_energies.union(pids_missing_pdl_actions).union(bad_pids)
class EnergyComponent(NamedTuple):
name: str
weight: float
energy: float
class PDB_Info(NamedTuple):
sid: str
pid: str
uid: str
gid: str
sharing_gid: str
scoretype: str
pdl: Dict
energy: float
energy_components: List[EnergyComponent]
timestamp: int
parent_sid: Optional[str]
tmscore: float
deviations: np.ndarray
class SnapshotDelta(NamedTuple):
sid: str
parent_sid: Optional[str]
timestamp: int
action_diff: Counter
macro_diff: Counter
action_count: int
energy_diff: float
class SolvingLineVariant(NamedTuple):
action_count: int
time: int
indices: List[int]
class SolvingLine(NamedTuple):
action_count: int
time: int
pdb_infos: List[PDB_Info]
variants: List[SolvingLineVariant]
@property
def energies(self):
return [x.energy for x in self.pdb_infos]
class EvolvingLine(NamedTuple):
source: Dict
pdb_infos: List[PDB_Info]
@property
def energies(self):
return [x.energy for x in self.pdb_infos]
class PuzzleMeta(NamedTuple):
pid: str
best_tmscores: Dict
pfront: np.ndarray
upload_baseline: float
energy_baseline: float
structure: Dict
class PatternInstance(NamedTuple):
cid: int
uid: str
pid: str
start_idx: int
end_idx: int
class PatternInstanceExt(NamedTuple):
cid: int
uid: str
pid: str
start_idx: int
end_idx: int
start_pdb: PDB_Info
end_pdb: PDB_Info
pre_best: PDB_Info
post_best: PDB_Info
class SubPatternInstance(NamedTuple):
p: PatternInstance
label: str
start_idx: int
end_idx: int
class SubLookup(NamedTuple):
clusters: Dict[str, Dict[int, Dict[int, Dict[int, np.ndarray]]]] # (user to k to cid to sub_k to cluster labels)
mrfs: Dict[str, Dict[int, Dict[int, Dict[int, Dict[int, np.ndarray]]]]] # (user to k to cid to sub_k to mrf dictionary (cluster label to mrf))
models: Dict[str, Dict[int, Dict[int, Dict[int, Dict]]]] # (user to k to cid to sub_k to dict of ticc model parameters)
bics: Dict[str, Dict[int, Dict[int, Dict[int, float]]]] # (user to k to cid to sub_k to bic)
class SubSeriesLookup(NamedTuple):
patterns: Dict[Hashable, np.ndarray] # e.g., (uid, pid, start index) -> series for that pattern
series: np.ndarray
idx_lookup: Dict[Hashable, Tuple[int, int]]
class SubclusterSeries(NamedTuple):
labels: List[str]
series: np.ndarray
# type aliases
SubClusters = Dict[int, Dict[int, Dict[int, np.ndarray]]]
SubMRFs = Dict[int, Dict[int, Dict[int, Dict[int, np.ndarray]]]]
PatternLookup = Union[Dict[str, Iterable[PatternInstance]], Dict[int, Dict[int, Iterable[PatternInstance]]]]
@pd.api.extensions.register_series_accessor("foldit")
class FolditSeriesAccessor:
def __init__(self, pandas_obj: pd.Series):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj: pd.Series):
# verify there is a column latitude and a column longitude
if ('lines' not in obj.index or 'evol_lines' not in obj.index) and (obj.name != "lines" and obj.name != "evol_lines"):
raise AttributeError("Must have 'lines' and 'evol_lines'.")
@property
def solo_pdbs(self):
return [p for l in self._obj.lines for p in l.pdb_infos] if self._obj.lines else []
@property
def evol_pdbs(self):
return [p for l in self._obj.evol_lines for p in l.pdb_infos] if self._obj.evol_lines else []
@property
def solo_energies(self):
return [p.energy for p in self._obj.foldit.solo_pdbs]
@property
def evol_energies(self):
return [p.energy for p in self._obj.foldit.evol_pdbs]
@pd.api.extensions.register_dataframe_accessor("foldit")
class FolditAccessor:
def __init__(self, pandas_obj: pd.Series):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj: pd.Series):
# verify there is a column latitude and a column longitude
if 'lines' not in obj.columns or 'evol_lines' not in obj.columns:
raise AttributeError("Must have 'lines' and 'evol_lines'.")
@property
def solo_pdbs(self):
return self._obj.apply(lambda r: r.foldit.solo_pdbs, axis=1)
@property
def evol_pdbs(self):
return self._obj.apply(lambda r: r.foldit.evol_pdbs, axis=1)
@property
def solo_energies(self):
return self._obj.apply(lambda r: r.foldit.solo_energies, axis=1)
@property
def evol_energies(self):
return self._obj.apply(lambda r: r.foldit.evol_energies, axis=1)
# @property
# def pdbs(self):
ROOT_NID = ('00000000-0000-0000-0000-000000000000', 0)
category_lookup = {
'overall': '992758',
'beginner': '992759',
'prediction': '992760',
'design': '992761',
'electron': '994237',
'contacts': '997946',
'symmetry': '992769',
'casp10': '992762',
'casp11': '997398',
'casp_roll': '993715',
'hand_folding': '994890',
'small_molecule_design': '2002074',
"pilot": "2004148",
'all': 'all', # dummy to allow select of all categorized puzzles
}
action_types = {
'optimize': {'ActionGlobalMinimize', 'ActionGlobalMinimizeBackbone', 'ActionGlobalMinimizeSidechains',
'ActionLocalMinimize', 'ActionRepack'},
'hybrid': {'ActionLocalMinimizePull', 'LoopHash', 'ActionBuild', 'ActionPullSidechain', 'ActionTweak',
'ActionRebuild'},
'manual': {'ActionSetPhiPsi', 'ActionJumpWidget', 'ActionRotamerCycle', 'ActionRotamerSelect'},
'guiding': {'ActionInsertCut', 'ActionLockToggle', 'ActionCopyToggle', 'ActionSecStructAssignHelix',
'ActionSecStructAssignLoop', 'ActionSecStructAssignSheet', 'ActionSecStructDSSP', 'ActionSecStructDrag',
'ActionBandAddAtomAtom', 'ActionBandAddDrag', 'ActionBandAddResRes', 'ActionBandDrag',
'ActionBandLength', 'ActionBandStrength'},
}
action_types['deliberate'] = action_types['hybrid'].union(action_types['manual']).union(action_types['guiding'])
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
def iden(x):
return x
def get_ranks(datafile):
puzzles = {}
with open("{}.csv".format(datafile)) as fp:
ranks_in = csv.DictReader(fp)
for row in ranks_in:
row['energy'] = float(row['best_score'])
row['best_score'] = max(float(row['best_score']) * -10 + 8000, 0)
pid = row['pid']
if pid not in puzzles:
puzzles[pid] = {
'groups': {},
'soloists': [],
'evolvers': [],
'categories': []
}
if row['gid'] == '0':
row['gid'] = 'NULL' # no sense in having both 0 and NULL for no group
gid = row['gid']
if gid != 'NULL':
gs = puzzles[pid]['groups']
if gid not in gs:
gs[gid] = {
'score': row['best_score'],
'type': row['type'],
'gid': gid,
'uid': row['uid'],
}
if gs[gid]['score'] < row['best_score']:
gs[gid]['score'] = row['best_score']
gs[gid]['type'] = row['type']
gs[gid]['uid'] = row['uid']
if row['type'] == '1':
puzzles[pid]['soloists'].append(row)
if row['type'] == '2':
puzzles[pid]['evolvers'].append(row)
for pid in puzzles:
p = puzzles[pid]
p['groups'] = list(p['groups'].values())
# reverse sorts to put them in descending order (top ranked should be first)
p['groups'].sort(key=lambda x: x['score'], reverse=True)
for i, g in enumerate(p['groups']):
g['rank'] = i
g['norm_rank'] = i / len(p['groups'])
p['soloists'].sort(key=lambda x: x['best_score'], reverse=True)
for i, s in enumerate(p['soloists']):
s['rank'] = i
s['norm_rank'] = i / len(p['soloists'])
p['evolvers'].sort(key=lambda x: x['best_score'], reverse=True)
for i, e in enumerate(p['evolvers']):
e['rank'] = i
e['norm_rank'] = i / len(p['evolvers'])
return puzzles
def get_ranks_labeled():
puzzles = get_ranks("data/rprp_puzzle_ranks_latest")
with open("data/puzzle_categories_latest.csv") as fp:
cat_in = csv.DictReader(fp)
for r in cat_in:
pid = r['nid']
if pid in puzzles:
puzzles[pid]['categories'] = r['categories'].split(',')
puzzles[pid]['categories'].append('all')
with open("data/puzzle_labels_latest.json") as fp:
lab_in = json.load(fp)
for r in lab_in:
pid = r['pid']
if pid in puzzles:
assert r['title'] is not None
puzzles[pid]['title'] = r['title']
if r['desc'] is not None:
puzzles[pid]['desc'] = r['desc']
return puzzles
def add_pdbs_to_ranks(puzzles):
print("loading pdbs")
with open("data/top_pdbs.pickle", 'rb') as pdb_fp:
pdbs = pickle.load(pdb_fp)
pdbs = [p for p in pdbs if 'PID' in p and len(p['PDL']) > 0]
print("grouping pdbs")
pdbs_by_pid = {pid: list(g) for pid, g in groupby(pdbs, lambda p: p['PID'])}
for pid in pids_missing_data.union(unfetched_pids):
pid in puzzles and puzzles.pop(pid)
for pid in puzzles.copy():
pid not in pdbs_by_pid and puzzles.pop(pid)
for pid, ps in pdbs_by_pid.items():
if pid in puzzles:
puzzles[pid]['pdbs'] = ps
def sig_test(a, b, fstr="{} (n={}) {} (n={})", normal=False, thresholds=frozenset()):
if normal:
t, p = stats.ttest_ind(a, b, equal_var=False)
else:
U2, p = stats.mannwhitneyu(np.array(a), np.array(b), use_continuity=True, alternative='two-sided')
U = min(U2, len(a) * len(b) - U2)
N = len(a) * len(b)
f = len(list(filter(lambda xy: xy[0] > xy[1], product(a, b)))) / N
u = len(list(filter(lambda xy: xy[0] < xy[1], product(a, b)))) / N
if ('p' not in thresholds or p < thresholds['p']) and ('r' not in thresholds or abs(f - u) > thresholds['r']):
print(fstr.format("mean={:.6f}, median={:.6f}, std={:.6f}".format(np.mean(a), np.median(a), np.std(a)), len(a),
"mean={:.6f}, median={:.6f}, std={:.6f}".format(np.mean(b), np.median(b), np.std(b)), len(b)))
if normal:
print("test statistic t: {:.6f}".format(t))
else:
print("<NAME> U: {:.6f}".format(U))
print("significance (two-tailed): {:.6f}".format(p))
print("rank-biserial correlation: {:.3f}".format(f - u))
return p, f - u
def get_atoms(pdb):
raw = [[float(x) for x in s.strip(' "[]').split(" ")] for s in pdb['ca'].split(",")]
if all(k == 0 for k in raw[-1]):
return np.array(raw[:-1])
# remove spurious atom at 0 0 0 that appears at the end of each of these
return | np.array(raw) | numpy.array |
import os
from collections import OrderedDict
from wisdem import run_wisdem
import wisdem.postprocessing.compare_designs as compare_designs
import wisdem.postprocessing.wisdem_get as getter
import wisdem.commonse.utilities as util
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from generateTables import RWT_Tabular
# File management
thisdir = os.path.dirname(os.path.realpath(__file__))
ontology_dir = os.path.join(os.path.dirname(thisdir), "WT_Ontology")
fname_modeling_options = os.path.join(thisdir, "modeling_options.yaml")
fname_analysis_options = os.path.join(thisdir, "analysis_options.yaml")
folder_output = os.path.join(thisdir, "outputs")
def run_15mw(fname_wt_input):
float_flag = fname_wt_input.find('Volturn') >= 0
# Run WISDEM
prob, modeling_options, analysis_options = run_wisdem(fname_wt_input, fname_modeling_options, fname_analysis_options)
# Produce standard plots
compare_designs.run([prob], ['IEA Wind 15-MW'], modeling_options, analysis_options)
# Tabular output: Blade Shape
blade_shape = np.c_[prob.get_val('blade.outer_shape_bem.s'),
prob.get_val('blade.outer_shape_bem.ref_axis','m')[:,2],
prob.get_val('blade.outer_shape_bem.chord','m'),
prob.get_val('blade.outer_shape_bem.twist', 'deg'),
prob.get_val('blade.interp_airfoils.r_thick_interp')*100,
prob.get_val('blade.outer_shape_bem.pitch_axis')*100,
prob.get_val('blade.outer_shape_bem.ref_axis','m')[:,0],
prob.get_val('blade.outer_shape_bem.ref_axis','m')[:,1],
]
blade_shape_col = ['Blade Span','Rotor Coordinate [m]',
'Chord [m]', 'Twist [deg]',
'Relative Thickness [%]', 'Pitch Axis Chord Location [%]',
'Prebend [m]', 'Sweep [m]']
bladeDF = pd.DataFrame(data=blade_shape, columns=blade_shape_col)
# Tabular output: Blade Stiffness
blade_stiff = np.c_[prob.get_val('rotorse.r','m'),
prob.get_val('rotorse.A','m**2'),
prob.get_val('rotorse.EA','N'),
prob.get_val('rotorse.EIxx','N*m**2'),
prob.get_val('rotorse.EIyy','N*m**2'),
prob.get_val('rotorse.EIxy','N*m**2'),
prob.get_val('rotorse.GJ','N*m**2'),
prob.get_val('rotorse.rhoA','kg/m'),
prob.get_val('rotorse.rhoJ','kg*m'),
prob.get_val('rotorse.x_ec','mm'),
prob.get_val('rotorse.y_ec','mm'),
prob.get_val('rotorse.re.x_tc','mm'),
prob.get_val('rotorse.re.y_tc','mm'),
prob.get_val('rotorse.re.x_sc','mm'),
prob.get_val('rotorse.re.y_sc','mm'),
prob.get_val('rotorse.re.x_cg','mm'),
prob.get_val('rotorse.re.y_cg','mm'),
prob.get_val('rotorse.re.precomp.flap_iner','kg/m'),
prob.get_val('rotorse.re.precomp.edge_iner','kg/m')]
blade_stiff_col = ['Blade Span [m]',
'Cross-sectional area [m^2]',
'Axial stiffness [N]',
'Edgewise stiffness [Nm^2]',
'Flapwise stiffness [Nm^2]',
'Flap-edge coupled stiffness [Nm^2]',
'Torsional stiffness [Nm^2]',
'Mass density [kg/m]',
'Polar moment of inertia density [kg*m]',
'X-distance to elastic center [mm]',
'Y-distance to elastic center [mm]',
'X-distance to tension center [mm]',
'Y-distance to tension center [mm]',
'X-distance to shear center [mm]',
'Y-distance to shear center [mm]',
'X-distance to mass center [mm]',
'Y-distance to mass center [mm]',
'Section flap inertia [kg/m]',
'Section edge inertia [kg/m]',
]
bladeStiffDF = pd.DataFrame(data=blade_stiff, columns=blade_stiff_col)
# Blade internal laminate layer details
layerDF = []
l_s = prob.get_val("blade.internal_structure_2d_fem.s")
lthick = prob.get_val("blade.internal_structure_2d_fem.layer_thickness", 'm')
lrot = prob.get_val("blade.internal_structure_2d_fem.layer_rotation", 'deg')
lstart = prob.get_val("blade.internal_structure_2d_fem.layer_start_nd")
lend = prob.get_val("blade.internal_structure_2d_fem.layer_end_nd")
nlay = lthick.shape[0]
layer_cols = ['Span','Thickness [m]','Fiber angle [deg]','Layer Start','Layer End']
for k in range(nlay):
ilay = np.c_[l_s, lthick[k,:], lrot[k,:], lstart[k,:], lend[k,:]]
layerDF.append( pd.DataFrame(data=ilay, columns=layer_cols) )
# Tabular output: Rotor Performance
rotor_perf = np.c_[prob.get_val("rotorse.rp.powercurve.V",'m/s'),
prob.get_val("rotorse.rp.powercurve.pitch",'deg'),
prob.get_val("rotorse.rp.powercurve.P",'MW'),
prob.get_val("rotorse.rp.powercurve.Cp"),
prob.get_val("rotorse.rp.powercurve.Cp_aero"),
prob.get_val("rotorse.rp.powercurve.Omega",'rpm'),
prob.get_val("rotorse.rp.powercurve.Omega",'rad/s')*0.5*prob["configuration.rotor_diameter_user"],
prob.get_val("rotorse.rp.powercurve.T",'MN'),
prob.get_val("rotorse.rp.powercurve.Ct_aero"),
prob.get_val("rotorse.rp.powercurve.Q",'MN*m'),
prob.get_val("rotorse.rp.powercurve.Cq_aero"),
prob.get_val("rotorse.rp.powercurve.M",'MN*m'),
prob.get_val("rotorse.rp.powercurve.Cm_aero"),
]
rotor_perf_col = ['Wind [m/s]','Pitch [deg]',
'Power [MW]','Power Coefficient [-]','Aero Power Coefficient [-]',
'Rotor Speed [rpm]','Tip Speed [m/s]',
'Thrust [MN]','Thrust Coefficient [-]',
'Torque [MNm]','Torque Coefficient [-]',
'Blade Moment [MNm]','Blade Moment Coefficient [-]',
]
perfDF = pd.DataFrame(data=rotor_perf, columns=rotor_perf_col)
# Nacelle mass properties tabular
# Columns are ['Mass', 'CoM_x', 'CoM_y', 'CoM_z',
# 'MoI_cm_xx', 'MoI_cm_yy', 'MoI_cm_zz', 'MoI_cm_xy', 'MoI_cm_xz', 'MoI_cm_yz',
# 'MoI_TT_xx', 'MoI_TT_yy', 'MoI_TT_zz', 'MoI_TT_xy', 'MoI_TT_xz', 'MoI_TT_yz']
nacDF = prob.model.wt.drivese.nac._mass_table
hub_cm = float(prob["drivese.hub_system_cm"])
L_drive = float(prob["drivese.L_drive"])
tilt = float(prob.get_val('nacelle.uptilt', 'rad'))
shaft0 = prob["drivese.shaft_start"]
Cup = -1.0
hub_cm = R = shaft0 + (L_drive + hub_cm) * np.array([Cup * np.cos(tilt), 0.0, np.sin(tilt)])
hub_mass = prob['drivese.hub_system_mass']
hub_I = prob["drivese.hub_system_I"]
hub_I_TT = util.rotateI(hub_I, -Cup * tilt, axis="y")
hub_I_TT = util.unassembleI( util.assembleI(hub_I_TT) +
hub_mass * (np.dot(R, R) * np.eye(3) - np.outer(R, R)) )
blades_mass = prob['drivese.blades_mass']
blades_I = prob["drivese.blades_I"]
blades_I_TT = util.rotateI(blades_I, -Cup * tilt, axis="y")
blades_I_TT = util.unassembleI( util.assembleI(blades_I_TT) +
blades_mass * (np.dot(R, R) * np.eye(3) - np.outer(R, R)) )
rna_mass = prob['drivese.rna_mass']
rna_cm = R = prob['drivese.rna_cm']
rna_I_TT = prob['drivese.rna_I_TT']
rna_I = util.unassembleI( util.assembleI(rna_I_TT) +
rna_mass * (np.dot(R, R) * | np.eye(3) | numpy.eye |
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, \
load_robot_execution_failures
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from glob import glob
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from tsfresh.transformers import RelevantFeatureAugmenter
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters
settings = ComprehensiveFCParameters()
from tsfresh import extract_features
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from tsfresh.feature_selection.relevance import calculate_relevance_table
from pca import PCAForPandas
from dtwnn import KnnDtw
from boruta import BorutaPy
import copy
import time
import sys
import csv
import matplotlib.colors as mcolors
# adjust for testing, but the full run requires 10 stratified sample folds
num_folds = 10
# tell pandas to consider infinity as a missing value (for filtering)
pd.options.mode.use_inf_as_na = True
# record our overall start time for time delta display in log messages
mark = time.time()
# return value to indicate that the test for a fold failed and should be ignored
ignore_this_fold = {
'rfc': -1,
'ada': -1,
'rfc_count': -1,
'ada_count': -1,
}
# read both the TEST and TRAIN files for a particular
# dataset into a single set, then partition the data
# and label into X and y DataFrames
def get_combined_raw_dataset(root_path: str):
name = root_path.split('/')[2]
raw_train = pd.read_csv(root_path + name + '_TRAIN.tsv', delimiter='\t', header=None)
raw_test = pd.read_csv(root_path + name + '_TEST.tsv', delimiter='\t', header=None)
combined = raw_train.append(raw_test)
v = combined.reset_index().drop(['index'], axis=1)
X = v.iloc[:,1:]
y = v.iloc[:,:1]
return (X, y)
# convert a raw dataframe into the vertically oriented
# format that tsfresh requires for feature extraction
def raw_to_tsfresh(X, y):
ids = []
values = []
ys = []
indices = []
for id, row in X.iterrows():
c = (y.loc[[id], :]).iloc[0][0]
ys.append(int(c))
indices.append(id)
first = True
for v in row:
if (not first):
ids.append(id)
values.append(float(v))
first = False
d = { 'id': ids, 'value': values }
return (pd.DataFrame(data=d), pd.Series(data=ys, index=indices))
# helper function to filter features out of a dataframe given
# a calculated tsfresh relevance table (R)
def filter_features(df, R):
for id, row in R.iterrows():
if (row['relevant'] == False):
df = df.drop([row['feature']], axis=1)
return df
# calculate the accuracy rate of a prediction
def accuracy_rate(predicted, actual):
correct = 0
for p, a in zip(predicted, actual):
if (p == a):
correct += 1
return correct / len(predicted)
# a single place to configure our RFC and ADA classifiers:
def build_rfc():
return RandomForestClassifier()
def build_ada():
return AdaBoostClassifier()
# Perform the standard FRESH algorithm
def perform_fresh(X_train, y_train, X_test, y_test):
log('Processing fresh')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
extracted_train = extracted_train.dropna(axis='columns')
# We run FRESH and its variants first at the default fdr level of 0.05,
# but if it returns 0 features (why?) then we lower the value and try
# again.
filtered_train = None
for fdr in [0.05, 0.01, 0.005, 0.001, 0.00001]:
log('Using ' + str(fdr))
R = calculate_relevance_table(extracted_train, y_train.squeeze(), fdr_level=fdr)
filtered_train = filter_features(extracted_train, R)
if (filtered_train.shape[1] > 0):
break
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = filter_features(extracted_test, R)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Safely executes a feature-based fold run, catching any
# exceptions so that we simply ignore this failed fold. This
# was added to make FRESH and its variants more robust, as
# sometimes a single fold out of 10 in FRESH would fail as
# the algorithm (even at low fdr settings) would report zero
# relevant features
def run_safely(f, X_train, y_train, X_test, y_test):
try:
return f(X_train, y_train, X_test, y_test)
except:
return ignore_this_fold
# FRESH variant with PCA run on the extracted relevant features
def perform_fresh_pca_after(X_train, y_train, X_test, y_test):
log('Processing fresh_pca_after')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
# For some reason, tsfresh is extracting features that contain Nan,
# Infinity or None. This breaks the PCA step. To avoid this, we
# drop columns that contain these values. I know of nothing else to do here.
extracted_train = extracted_train.dropna(axis='columns')
filtered_train = None
# execute at different fdr levels to try to make FRESH more robust
for fdr in [0.05, 0.01, 0.005, 0.001]:
R = calculate_relevance_table(extracted_train, y_train.squeeze(), fdr_level=fdr)
filtered_train = filter_features(extracted_train, R)
if (filtered_train.shape[1] > 0):
break
# Perform PCA on the filtered set of features
pca_train = PCAForPandas(n_components=0.95, svd_solver='full')
filtered_train = pca_train.fit_transform(filtered_train)
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = filter_features(extracted_test, R)
filtered_test = pca_train.transform(filtered_test)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# FRESH variant that runs PCA before the filtering step
def perform_fresh_pca_before(X_train, y_train, X_test, y_test):
log('Processing fresh_pca_before')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
# For some reason, tsfresh is extracting features that contain Nan,
# Infinity or None. This breaks the PCA step. To avoid this, we
# drop columns that contain these values.
extracted_train = extracted_train.dropna(axis='columns')
# Perform PCA on the complete set of extracted features
pca_train = PCAForPandas(n_components=0.95, svd_solver='full')
extracted_train = pca_train.fit_transform(extracted_train)
filtered_train = extracted_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = pca_train.transform(extracted_test)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# The Borunta based feature-extraction algorithm
def perform_boruta(X_train, y_train, X_test, y_test):
log('Processing boruta')
rf = build_rfc()
feat_selector = BorutaPy(rf, n_estimators='auto', perc=90, verbose=2, random_state=0)
feat_selector.fit(X_train.values, y_train.values)
X_filtered = feat_selector.transform(X_train.values)
X_test_filtered = feat_selector.transform(X_test.values)
trained_model = rf.fit(X_filtered, y_train.squeeze().values)
rfc_predicted = list(map(lambda v: int(v), rf.predict(X_test_filtered)))
actual = y_test.squeeze().tolist()
bdt = build_ada()
trained_model = bdt.fit(X_filtered, y_train.squeeze().values)
ada_predicted = list(map(lambda v: int(v), bdt.predict(X_test_filtered)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(rf.estimators_),
'ada_count': len(bdt.estimators_),
}
# LDA
def perform_lda(X_train, y_train, X_test, y_test):
log('Processing lda')
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
y_test = y_test.values
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
lda = LDA()
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
rf = build_rfc()
trained_model = rf.fit(X_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), rf.predict(X_test)))
actual = y_test.squeeze().tolist()
bdt = build_ada()
trained_model = bdt.fit(X_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(X_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(rf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Take the extracted features from FRESH and use them unfiltered
# to make a prediction
def perform_unfiltered(X_train, y_train, X_test, y_test):
log('Processing unfiltered')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction only
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_train = extracted_train.dropna(axis='columns')
extracted_test = extracted_test.dropna(axis='columns')
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(extracted_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(extracted_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(extracted_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(extracted_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Nearest Neighbors with Dynamic Time Warping
def perform_dtw_nn(X_train, y_train, X_test, y_test):
log('Processing dtw_nn')
m = KnnDtw(n_neighbors=1, max_warping_window=10)
m.fit(X_train.values, y_train.values)
predicted, proba = m.predict(X_test.values)
actual = y_test.squeeze().tolist()
return accuracy_rate(predicted, actual), 0
# A simple majority vote classifier
def perform_trivial(X_train, y_train, X_test, y_test):
log('Processing trivial')
counts = {}
for v in y_train:
if v not in counts:
counts[v] = 1
else:
counts[v] = counts.get(v) + 1
m = -1
majority = None
for k in counts:
v = counts.get(k)
if (v > m):
m = v
majority = k
majority = np.argmax(counts)
predicted = np.full(len(y_test.squeeze().values), majority)
actual = y_test.squeeze().tolist()
return accuracy_rate(predicted, actual)
# Process a single test/train fold
def process_fold(X_train, y_train, X_test, y_test):
# Fresh and it's variants
fresh = run_safely(perform_fresh, X_train, y_train, X_test, y_test)
fresh_b = run_safely(perform_fresh_pca_before, X_train, y_train, X_test, y_test)
fresh_a = run_safely(perform_fresh_pca_after, X_train, y_train, X_test, y_test)
unfiltered = run_safely(perform_unfiltered, X_train, y_train, X_test, y_test)
# The other two feature-based approaches
boruta = run_safely(perform_boruta, X_train, y_train, X_test, y_test)
lda = run_safely(perform_lda, X_train, y_train, X_test, y_test)
# Shape based DTW_NN and the majority vote classifier
dtw = perform_dtw_nn(X_train, y_train, X_test, y_test)
trivial = perform_trivial(X_train, y_train, X_test, y_test)
return ({
'Boruta_ada': boruta.get('ada'),
'Boruta_rfc': boruta.get('rfc'),
'DTW_NN': dtw[0],
'FRESH_PCAa_ada': fresh_a.get('ada'),
'FRESH_PCAa_rfc': fresh_a.get('rfc'),
'FRESH_PCAb_ada': fresh_b.get('ada'),
'FRESH_PCAb_rfc': fresh_b.get('rfc'),
'FRESH_ada': fresh.get('ada'),
'FRESH_rfc': fresh.get('rfc'),
'LDA_ada': lda.get('ada'),
'LDA_rfc': lda.get('rfc'),
'ada': unfiltered.get('ada'),
'rfc': unfiltered.get('rfc'),
'trivial': trivial,
}, {
'Boruta_ada': boruta.get('ada_count'),
'Boruta_rfc': boruta.get('rfc_count'),
'DTW_NN': dtw[1],
'FRESH_PCAa_ada': fresh_a.get('ada_count'),
'FRESH_PCAa_rfc': fresh_a.get('rfc_count'),
'FRESH_PCAb_ada': fresh_b.get('ada_count'),
'FRESH_PCAb_rfc': fresh_b.get('ada_count'),
'FRESH_ada': fresh.get('ada_count'),
'FRESH_rfc': fresh.get('rfc_count'),
'LDA_ada': lda.get('ada_count'),
'LDA_rfc': lda.get('rfc_count'),
'ada': unfiltered.get('ada_count'),
'rfc': unfiltered.get('rfc_count'),
'trivial': 0,
})
# Complete processing of one data set. Does 10-fold cross-validation
# extraction and classification
def process_data_set(root_path: str):
combined_X, combined_y = get_combined_raw_dataset(root_path)
skf = StratifiedKFold(n_splits=num_folds)
skf.get_n_splits(combined_X, combined_y)
total_acc = 0
results = []
fold = 1
for train_index, test_index in skf.split(combined_X, combined_y):
log('Processing fold ' + str(fold))
X_train, X_test = combined_X.iloc[train_index], combined_X.iloc[test_index]
y_train, y_test = combined_y.iloc[train_index], combined_y.iloc[test_index]
results.append(process_fold(X_train, y_train, X_test, y_test))
fold += 1
# For this dataset, averages is a map from the name of the
# pipeline (e.g. Boruta_rfc) to the average of all folds,
# similar for std_devs
averages, std_devs, counts = calc_statistics(results)
return averages, std_devs, counts
# Calculates the mean, std_dev and average counts of the
# results
def calc_statistics(results):
averages = {}
std_devs = {}
counts = {}
for k in results[0][0]:
values = []
for r in results:
f = r[0]
if (f.get(k) != -1):
values.append(f.get(k))
averages[k] = np.mean(values)
std_devs[k] = np.std(values)
for k in results[0][1]:
values = []
for r in results:
f = r[1]
if (f.get(k) != -1):
values.append(f.get(k))
counts[k] = np.mean(values)
return averages, std_devs, counts
# dump contents of array of strings to a file
def out_to_file(file: str, lines):
f = open(file, 'w')
for line in lines:
f.write(line + '\n')
f.close()
# log our progress.
def log(message):
elapsed = str(round(time.time() - mark, 0))
f = open('./log.txt', 'w+')
f.write('[' + elapsed.rjust(15, '0') + '] ' + message + '\n')
f.close()
# Output the captured results to the various tsv output files
def output_results(results):
header = 'dataset'
first = results.get(next(iter(results)))[0]
for k in first:
header = header + '\t' + k
# averages
lines = [header]
for r in results:
line = r
aves = results.get(r)[0]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./averages.tsv', lines)
# std_devs
lines = [header]
for r in results:
line = r
aves = results.get(r)[1]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./std_devs.tsv', lines)
# counts
lines = [header]
for r in results:
line = r
aves = results.get(r)[2]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./counts.tsv', lines)
def get_dataset_dirs():
return glob("./data/*/")
# builds a (X, y) DataFrame pair of a random time series with
# a binary label and specified number of samples and length
def build_random_ts(num_samples, length_of_ts):
data = {}
labels = []
for s in range (0, num_samples):
labels.append(np.random.choice([1, 2]))
data['y'] = labels
for col in range(0, length_of_ts):
key = 'feature_' + str(col + 1)
values = []
for s in range (0, num_samples):
values.append(np.random.normal())
data[key] = values
df = pd.DataFrame.from_dict(data)
X = df.iloc[:,1:]
y = df.iloc[:,:1]
return (X, y)
# Dump the current snapshot of results to a given output filename
def capture_timing_result(f, results):
lines = []
for r in results:
values = results.get(r)
line = r
for v in values:
line = line + '\t' + str(v)
lines.append(line)
out_to_file(f, lines)
# Perform the full timing test first for fixed number of
# samples and then a fixed length of time series
def perform_timing_test():
log('performing timing test')
# The collection of tests that we run
tests = [
('Boruta', perform_boruta),
('DTW_NN', perform_dtw_nn),
('FRESH', perform_fresh),
('FRESH_PCAa', perform_fresh_pca_after),
('FRESH_PCAb', perform_fresh_pca_before),
('LDA', perform_lda),
('Full_X', perform_unfiltered)
]
# keep the number of samples constant
constant_samples_results = {}
for test in tests:
constant_samples_results[test[0]] = []
for length in [100, 1000, 2000]:
log('running 1000 samples and ' + str(length) + ' length')
X, y = build_random_ts(1000, length)
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, y)
train_index, test_index = next(skf.split(X, y))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
for test in tests:
mark = time.time()
try:
test[1](X_train, y_train, X_test, y_test)
except:
log(test[0] + ' ERROR')
constant_samples_results.get(test[0]).append(time.time() - mark)
capture_timing_result('./fixed_samples.tsv', constant_samples_results)
# keep the length constant
constant_length_results = {}
for test in tests:
constant_length_results[test[0]] = []
for num_samples in [100, 1000, 2000]:
log('running 1000 length and ' + str(length) + ' samples')
X, y = build_random_ts(num_samples, 1000)
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, y)
train_index, test_index = next(skf.split(X, y))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
for test in tests:
mark = time.time()
try:
test[1](X_train, y_train, X_test, y_test)
except:
log(test[0] + ' ERROR')
constant_length_results.get(test[0]).append(time.time() - mark)
capture_timing_result('./fixed_length.tsv', constant_length_results)
def load_and_plot(filename, out, title, colormap, vmax):
df = pd.read_csv(filename, delimiter='\t')
datasets = df['dataset'].tolist()
algorithms = list(df.columns.values)[1:]
data = df.iloc[:,1:].values
create_heatmap(out, data, datasets, algorithms, title, colormap, vmax)
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def create_boxplot(data, algorithms):
fig = plt.figure(1, figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# rectangular box plot
bplot1 = ax.boxplot(data,
vert=True, # vertical box alignment
patch_artist=True, # fill with color
labels=algorithms) # will be used to label x-ticks
ax.set_title('Used Features')
# fill with colors
colors = ['pink', 'orange', 'darkgoldenrod', 'olive', 'green', 'lightseagreen', 'seagreen', 'lightgreen', 'deepskyblue', 'orchid', 'hotpink', 'palevioletred']
for patch, color in zip(bplot1['boxes'], colors):
patch.set_facecolor(color)
# adding horizontal grid lines
ax.yaxis.grid(True)
plt.setp(ax.get_xticklabels(), rotation=90, ha="right")
ax.set_xlabel('Algorithm')
ax.set_ylabel('Used feature counts')
plt.savefig('./results/counts.png')
def create_heatmap(out, data, row_labels, col_labels, title, colormap, vmax, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, cmap=colormap, vmin=0, vmax=vmax, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
plt.gcf().subplots_adjust(bottom=0.25)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
ax.tick_params(axis='both', which='major', labelsize=6)
ax.tick_params(axis='both', which='minor', labelsize=6)
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=90, ha="right")
plt.title(title)
# Turn spines off and create white grid.
#for edge, spine in ax.spines.items():
# spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.6, minor=True)
ax.set_yticks( | np.arange(data.shape[0]+1) | numpy.arange |
# Python Classes/Functions containing Utility Functions for Tycho
# Keep This Class Unitless!
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# Importing Necessary System Packages
import sys, os, math
import numpy as np
import matplotlib as plt
import time as tp
import hashlib
import time
import datetime
# Importing cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
from amuse.ic.brokenimf import MultiplePartIMF
from amuse.community.sse.interface import SSE
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def new_seed_from_string(string):
''' Creates a seed for Numpy.RandomState() usin a string.
string: The provided string to use.
'''
hash_md5 = hashlib.md5(str(string).encode('utf-8')).hexdigest()
hash_int = ""
for c in hash_md5:
if c.isalpha():
hash_int += str(ord(c))
else:
hash_int += c
seed = int(hash_int) % (2**32 -1)
return seed
def store_ic(converter, options):
''' Creates a Structured Numpy Array to Store Initial Conditions.
converter: AMUSE NBody Converter Used in Tycho.
options: Commandline Options Set by User.
'''
ic_dtype = np.dtype({'names': ['cluster_name','seed','num_stars','num_planets','total_smass','viral_radius','w0','IBF'], \
'formats': ['S8', 'S8', 'i8', 'i8','f8','f8','f8','f4']})
ic_array = np.recarray(1, dtype=ic_dtype)
ic_array[0].cluster_name = options.cluster_name
ic_array[0].seed = options.seed
ic_array[0].num_stars = options.num_stars
ic_array[0].num_planets = options.num_psys
tsm = converter.to_si(converter.values[1]).number
vr = converter.to_si(converter.values[2]).number
ic_array[0].total_smass = tsm
ic_array[0].viral_radius = vr
ic_array[0].w0 = options.w0
#ic_array[0].IBF = options.IBF
return ic_array[0]
def preform_EulerRotation(particle_set):
''' Preforms a randomly oriented Euler Transformation to a set of AMUSE Particles.
particle_set: AMUSE particle set which it will preform the transform on.
!! Based on <NAME>'s 1996 "Fast Random Rotation Matrices"
!! https://pdfs.semanticscholar.org/04f3/beeee1ce89b9adf17a6fabde1221a328dbad.pdf
'''
# First: Generate the three Uniformly Distributed Numbers (Two Angles, One Decimal)
n_1 = np.random.uniform(0.0, math.pi*2.0)
n_2 = np.random.uniform(0.0, math.pi*2.0)
n_3 = np.random.uniform(0.0, 1.0)
# Second: Calculate Matrix & Vector Values
c1 = np.cos(n_1)
c2 = np.cos(n_2)
s1 = np.sin(n_1)
s2 = np.sin(n_2)
r3 = np.sqrt(n_3)
R = [[ c1, s1, 0.0],
[ -s1, c1, 0.0],
[ 0.0, 0.0, 1.0]]
V = [[c2*r3],
[s2*r3],
[np.sqrt(1-n_3)]]
# Third: Create the Rotation Matrix
# This was the old rotation matrix calculation...
#rotate = (np.outer(V, V) - np.dot(np.eye(3),(R)))
# But here is the new one which more correctly implements the equations from the paper referenced above...
rotate = (2 * np.dot(np.outer(V, V), R) - np.dot(np.eye(3), R))
# Forth: Preform the Rotation & Update the Particle
for particle in particle_set:
pos = np.matrix(([[particle.x.number], [particle.y.number], [particle.z.number]]))
vel = np.matrix(([[particle.vx.number], [particle.vy.number], [particle.vz.number]]))
particle.position = np.dot(rotate,pos) | particle.position.unit # nbody_system.length
particle.velocity = np.dot(rotate,vel) | particle.velocity.unit
def calc_HillRadius(a, e, m_planet, m_star):
''' Calculates the Hill Radius for a planet given a, e, and the two masses.
a: The semi-major axis of the planet's orbit.
e: The eccentricity of the planet's orbit.
m_planet: The mass of the planet.
m_star: The mass of the star.
'''
return a*(1.0-e)*(m_planet/(3*m_star))**(1.5)
def calc_SnowLine(host_star):
''' Calculates the Snow Line (Ida & Lin 2005, Kennedy & Kenyon 2008)
'''
return 2.7*(host_star.mass/ (1.0 | units.MSun))**2.0 | units.AU
def calc_JovianPlacement(host_star):
''' Calculates the placement of a Jovian, scaling Jupiter's location based
on the host star's mass.
'''
a_jupiter = 5.454 | units.AU
return a_jupiter*(host_star.mass/ (1.0 | units.MSun))**2.0
def calc_PeriodRatio(planet1_a, planet2_a, mu):
period_1 = 2*np.pi* | np.sqrt(planet1_a**3/mu) | numpy.sqrt |
from scipy import integrate
import numpy as np
from quaternion_euler_utility import euler_quat, quat_euler, deriv_quat, quat_rot_mat
from numpy.linalg import norm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
from matplotlib import pyplot as plt
from scipy.spatial.transform import Rotation
"""""
QUADROTOR ENVIRONMENT
DEVELOPED BY:
<NAME>
PROGRAMA DE PÓS GRADUAÇÃO EM ENGENHARIA MECÂNICA, UNIVERSIDADE FEDERAL DO ABC
SP - SANTO ANDRÉ - BRASIL
FURTHER DOCUMENTATION ON README.MD
"""""
# matplotlib.use("pgf")
# matplotlib.rcParams.update({
# "pgf.texsystem": "pdflatex",
# 'font.family': 'serif',
# 'text.usetex': True,
# 'pgf.rcfonts': False,
# 'pgf.preamble':[
# '\DeclareUnicodeCharacter{2212}{-}']
# })
## SIMULATION BOUNDING BOXES ##
BB_POS = 5
BB_VEL = 10
BB_CONTROL = 9
BB_ANG = np.pi/2
# QUADROTOR MASS AND GRAVITY VALUE
M, G = 1.03, 9.82
# AIR DENSITY
RHO = 1.2041
#DRAG COEFFICIENT
C_D = 1.1
# ELETRIC MOTOR THRUST AND MOMENT
K_F = 1.435e-5
K_M = 2.4086e-7
I_R = 5e-5
T2WR = 2
# INERTIA MATRIX
J = np.array([[16.83e-3, 0, 0],
[0, 16.83e-3, 0],
[0, 0, 28.34e-3]])
# ELETRIC MOTOR DISTANCE TO CG
D = 0.26
#PROJECTED AREA IN X_b, Y_b, Z_b
BEAM_THICKNESS = 0.05
A_X = BEAM_THICKNESS*2*D
A_Y = BEAM_THICKNESS*2*D
A_Z = BEAM_THICKNESS*2*D*2
A = np.array([[A_X,A_Y,A_Z]]).T
## REWARD PARAMETERS ##
SOLVED_REWARD = 20
BROKEN_REWARD = -20
SHAPING_WEIGHT = 5
SHAPING_INTERNAL_WEIGHTS = [15, 4, 1]
# CONTROL REWARD PENALITIES #
P_C = 0.003
P_C_D = 0
## TARGET STEADY STATE ERROR ##
TR = [0.005, 0.01, 0.1]
TR_P = [3, 2, 1]
## ROBUST CONTROL PARAMETERS
class robust_control():
def __init__(self):
self.D_KF = 0.1
self.D_KM = 0.1
self.D_M = 0.3
self.D_IR = 0.1
self.D_J = np.ones(3) * 0.1
self.reset()
self.gust_std = [[5], [5], [2]]
self.gust_period = 500 # integration steps
self.i_gust = 0
self.gust = np.zeros([3, 1])
def reset(self):
self.episode_kf = np.random.random(4) * self.D_KF
self.episode_m = np.random.normal(0, self.D_M, 1)
self.episode_ir = np.random.random(4) * self.D_IR
self.episode_J = np.eye(3)*np.random.normal(np.zeros(3), self.D_J, [3])
def wind(self, i):
index = (i % self.gust_period) - 1
if index % self.gust_period == 0:
self.last_gust = self.gust
self.gust = np.random.normal(np.zeros([3, 1]), self.gust_std, [3, 1])
self.linear_wind_change = np.linspace(self.last_gust, self.gust, self.gust_period)
return self.linear_wind_change[index]
class quad():
def __init__(self, t_step, n, training = True, euler=0, direct_control=1, T=1, clipped = True):
""""
inputs:
t_step: integration time step
n: max timesteps
euler: flag to set the states return in euler angles, if off returns quaternions
deep learning:
deep learning flag: If on, changes the way the env. outputs data, optimizing it to deep learning use.
T: Number of past history of states/actions used as inputs in the neural network
debug: If on, prints a readable reward funcion, step by step, for a simple reward weight debugging.
"""
self.clipped = clipped
if training:
self.ppo_training = True
else:
self.ppo_training = False
self.mass = M
self.gravity = G
self.i = 0
self.T = T #Initial Steps
self.bb_cond = np.array([BB_VEL,
BB_VEL,
BB_VEL,
BB_ANG, BB_ANG, 3/4*np.pi,
BB_VEL*2, BB_VEL*2, BB_VEL*2]) #Bounding Box Conditions Array
if not self.ppo_training:
self.bb_cond = self.bb_cond*1
#Quadrotor states dimension
self.state_size = 13
#Quadrotor action dimension
self.action_size = 4
#Env done Flag
self.done = True
#Env Maximum Steps
self.n = n+self.T
self.t_step = t_step
#Neutral Action (used in reset and absolute action penalty)
if direct_control:
self.zero_control = np.ones(4)*(2/T2WR - 1)
else:
self.zero_control = | np.array([M*G, 0, 0, 0]) | numpy.array |
import datetime as dt
import unittest
import pandas as pd
import numpy as np
import numpy.testing as npt
import seaice.nasateam as nt
import seaice.tools.plotter.daily_extent as de
class Test_BoundingDateRange(unittest.TestCase):
def test_standard(self):
today = dt.date(2015, 9, 22)
month_bounds = (-3, 1)
expected_bounds = (dt.date(2015, 6, 1), dt.date(2015, 10, 31))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_year(self):
today = dt.date(2001, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2000, 12, 1), dt.date(2001, 2, 28))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_leap_year(self):
today = dt.date(2016, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2015, 12, 1), dt.date(2016, 2, 29))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
class Test_GetRecordYear(unittest.TestCase):
start_date = nt.BEGINNING_OF_SATELLITE_ERA
end_date = dt.date(2015, 12, 31)
date_index = pd.date_range(start_date, end_date)
base_series = pd.Series(index=date_index).fillna(5)
def _series(self, low=None, high=None, next_highest=None, next_lowest=None):
"""Return a series for easily testing record values. All the values are 5, with
different values set to the dates passed in as low, next_lowest, high,
and next_highest. The index of the returned series is from the beginning
of the satellite era to the end of 2015 (since that happens to be the
last complete year at the time of this writing).
"""
series = self.base_series.copy()
if high:
series[high] = 10
if next_highest:
series[next_highest] = 7
if next_lowest:
series[next_lowest] = 2
if low:
series[low] = 0
return series
def test_max(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002 , recordline:2002"""
series = self._series(high='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002(min) , recordline:2002"""
series = self._series(low='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014, recordline:2010"""
series = self._series(high='2014-03-15', next_highest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014(min), recordline:2010"""
series = self._series(low='2014-03-15', next_lowest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_is_included_in_month_bounds(self):
"""Date: 2/2015, range: 10/2014 -> 3/2015, record: 1/2014, recordline: 2013-2014"""
series = self._series(low='2014-04-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-02-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_before_and_crossover_forward(self):
"""Date: 12/2015, range: 8/2015 -> 1/2016, record: 12/2014, recordline: 2014-2015"""
series = self._series(low='2014-09-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-12-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004, recordline:2004"""
series = self._series(high='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004(min), recordline:2003-2004"""
series = self._series(low='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(high='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(low='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(high='2009-11-27', next_highest='2004-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2005
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(low='2009-11-27', next_lowest='2004-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2005
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_picks_most_months(self):
"""Date: 1/2010, range: 11/2009 -> 3/2010, record:10/2008, recordline:2007-2008"""
series = self._series(high='2008-10-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 2)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_picks_most_months(self):
"""Date: 1/2010, range: 11/2009 -> 3/2010, record:8/2008, recordline:2007-2008"""
series = self._series(low='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 2)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_picks_most_months_next_highest_record(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record: 8/2009, recordline: 2008-2009 """
series = self._series(high='2009-08-27', next_highest='2004-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_picks_most_months_next_highest_record(self):
"""Date: 1/2010, range:10/2009 -> 2/2010, record: 8/2009, recordline: 2008-2009"""
series = self._series(low='2009-08-27', next_lowest='2004-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_past_record_same_year(self):
"""Date: 9/2015, range:6/2015 -> 10/2015, record: 3/2015, recordline: 2010"""
series = self._series(low='2015-03-27', next_lowest='2010-03-28')
date = pd.to_datetime('2015-09-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_past_record_same_year_with_overlap(self):
"""Date: 9/2015, range:6/2015 -> 1/2016, record: 3/2015, recordline: 2014-2015"""
series = self._series(low='2015-03-27', next_lowest='2010-03-28')
date = pd.to_datetime('2015-09-15')
month_bounds = (-3, 4)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_record_not_plotted_same_most_months_picks_earlier_year(self):
"""Date: 1/2010, range: 11/2009 -> 2/2010, record: 8/2008 , recordline:2008-2009"""
series = self._series(high='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_starts_january_contains_record_month_same_year(self):
"""Date: 12/09, range: 09/2009 -> 1/2010, record: 9/2008 , recordline:2008-2009"""
series = self._series(high='2008-09-22')
date = pd.to_datetime('2009-12-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_starts_feb_contains_record_month_different_year(self):
"""Date: 1/10, range: 09/2009 -> 2/2010, record: 9/2008 , recordline:2008-2009"""
series = self._series(high='2008-09-22')
date = pd.to_datetime('2010-01-15')
month_bounds = (-4, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_record_not_plotted_same_most_months_picks_earlier_year(self):
"""Date: 1/2010, range: 11/2009 -> 2/2010, record:8/2008 , recordline:2008-2009"""
series = self._series(low='2008-08-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-2, 1)
# expectation
expected = 2009
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
class Test_YearWithMostMonthsInIndex(unittest.TestCase):
def test_longer_year_earlier(self):
index = pd.date_range(start='1999-01-01', end='2000-01-31')
actual = de._year_with_most_months_in_index(index)
expected = 1999
self.assertEqual(actual, expected)
def test_longer_year_later(self):
index = pd.date_range(start='1999-11-01', end='2000-04-29')
actual = de._year_with_most_months_in_index(index)
expected = 2000
self.assertEqual(actual, expected)
def test_earlier_year_when_equal_months(self):
index = pd.date_range(start='1999-11-01', end='2000-02-29')
actual = de._year_with_most_months_in_index(index)
expected = 1999
self.assertEqual(actual, expected)
class Test_DateIndexPrependDays(unittest.TestCase):
def test_adds_days_to_beginning_of_date_index(self):
date_index = pd.date_range(start='2005-01-05', end='2005-01-10')
days = 5
actual = de._date_index_prepend_days(date_index, days)
expected = pd.date_range(start='2004-12-31', end='2005-01-10')
self.assertTrue(actual.equals(expected))
class Test__ExtendSmoothDivide(unittest.TestCase):
def test_does_all_the_things(self):
date_index = pd.date_range(start='2000-01-06', end='2000-01-08')
nday_average = 3
divisor = 1e3
df_index = pd.Index([6, 7, 8], name='day of year')
df = pd.DataFrame({'data': [10000, 15000, 20000]}, index=df_index)
actual = de._extend_smooth_divide(df, date_index, nday_average, divisor)
# index extended
expected_index = pd.Index([3, 4, 5, 6, 7, 8])
npt.assert_array_equal(actual.index.values, expected_index.values)
# smoothed and divided
expected_data = np.array([np.nan, np.nan, np.nan, 10, 12.5, 15])
npt.assert_array_equal(actual.data.values, expected_data)
class Test_ClimatologyStatistics(unittest.TestCase):
def test_with_data_gets_average_stddevs_and_percentiles(self):
date_index = pd.date_range(start='2008-01-01', end='2008-01-10')
series1 = pd.Series([1000.0,
2000.0,
3000.0,
4000.0,
5000.0],
index=pd.date_range(start='2008-01-03', end='2008-01-07'))
series2 = pd.Series([2000.0,
3000.0,
4000.0,
5000.0,
6000.0],
index=pd.date_range(start='2009-01-03', end='2009-01-07'))
extents = series1.append(series2)
extents.name = 'total_extent_km2'
actual = de._climatology_statistics(extents, date_index,
percentiles=[0, 50, 100], nday_average=3, divisor=1e3)
expected_columns = ['climatology', 'climatology_lower', 'climatology_upper',
'percentile_0', 'percentile_50', 'percentile_100']
npt.assert_array_equal(sorted(actual.columns), sorted(expected_columns))
expected_climatology = [np.nan, np.nan, 1.5, 2., 2.5, 3.5, 4.5, 5., 5.5, np.nan]
expected_climatology_upper = [np.nan, np.nan, 2.914214, 3.414214, 3.914214, 4.914214,
5.914214, 6.414214, 6.914214, np.nan]
expected_climatology_lower = [np.nan, np.nan, 0.085786, 0.585786, 1.085786, 2.085786,
3.085786, 3.585786, 4.085786, np.nan]
npt.assert_array_equal(actual.climatology, expected_climatology)
npt.assert_array_almost_equal(actual.climatology_upper, expected_climatology_upper)
npt.assert_array_almost_equal(actual.climatology_lower, expected_climatology_lower)
expected_percentile_100 = [np.nan, np.nan, 2., 2.5, 3., 4., 5., 5.5, 6., np.nan]
npt.assert_array_equal(actual.percentile_100, expected_percentile_100)
expected_percentile_50 = [np.nan, np.nan, 1.5, 2., 2.5, 3.5, 4.5, 5., 5.5, np.nan]
| npt.assert_array_equal(actual.percentile_50, expected_percentile_50) | numpy.testing.assert_array_equal |
# DEPRECATED
from .. import settings
from .. import logging as logg
from ..preprocessing.moments import get_connectivities
from .utils import make_dense, make_unique_list, test_bimodality
import warnings
import matplotlib.pyplot as pl
from matplotlib import rcParams
import numpy as np
exp = np.exp
def log(x, eps=1e-6): # to avoid invalid values for log.
return np.log(np.clip(x, eps, 1 - eps))
def inv(x):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x_inv = 1 / x * (x != 0)
return x_inv
def unspliced(tau, u0, alpha, beta):
expu = exp(-beta * tau)
return u0 * expu + alpha / beta * (1 - expu)
def spliced(tau, s0, u0, alpha, beta, gamma):
c = (alpha - u0 * beta) * inv(gamma - beta)
expu, exps = exp(-beta * tau), exp(-gamma * tau)
return s0 * exps + alpha / gamma * (1 - exps) + c * (exps - expu)
def mRNA(tau, u0, s0, alpha, beta, gamma):
expu, exps = exp(-beta * tau), exp(-gamma * tau)
u = u0 * expu + alpha / beta * (1 - expu)
s = (
s0 * exps
+ alpha / gamma * (1 - exps)
+ (alpha - u0 * beta) * inv(gamma - beta) * (exps - expu)
)
return u, s
def vectorize(t, t_, alpha, beta, gamma=None, alpha_=0, u0=0, s0=0, sorted=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
o = np.array(t < t_, dtype=int)
tau = t * o + (t - t_) * (1 - o)
u0_ = unspliced(t_, u0, alpha, beta)
s0_ = spliced(t_, s0, u0, alpha, beta, gamma if gamma is not None else beta / 2)
# vectorize u0, s0 and alpha
u0 = u0 * o + u0_ * (1 - o)
s0 = s0 * o + s0_ * (1 - o)
alpha = alpha * o + alpha_ * (1 - o)
if sorted:
idx = np.argsort(t)
tau, alpha, u0, s0 = tau[idx], alpha[idx], u0[idx], s0[idx]
return tau, alpha, u0, s0
def tau_inv(u, s=None, u0=None, s0=None, alpha=None, beta=None, gamma=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
inv_u = (gamma >= beta) if gamma is not None else True
inv_us = np.invert(inv_u)
any_invu = np.any(inv_u) or s is None
any_invus = np.any(inv_us) and s is not None
if any_invus: # tau_inv(u, s)
beta_ = beta * inv(gamma - beta)
xinf = alpha / gamma - beta_ * (alpha / beta)
tau = -1 / gamma * log((s - beta_ * u - xinf) / (s0 - beta_ * u0 - xinf))
if any_invu: # tau_inv(u)
uinf = alpha / beta
tau_u = -1 / beta * log((u - uinf) / (u0 - uinf))
tau = tau_u * inv_u + tau * inv_us if any_invus else tau_u
return tau
def find_swichting_time(u, s, tau, o, alpha, beta, gamma, plot=False):
off, on = o == 0, o == 1
t0_ = np.max(tau[on]) if on.sum() > 0 and np.max(tau[on]) > 0 else np.max(tau)
if off.sum() > 0:
u_, s_, tau_ = u[off], s[off], tau[off]
beta_ = beta * inv(gamma - beta)
ceta_ = alpha / gamma - beta_ * alpha / beta
x = -ceta_ * exp(-gamma * tau_)
y = s_ - beta_ * u_
exp_t0_ = (y * x).sum() / (x ** 2).sum()
if -1 < exp_t0_ < 0:
t0_ = -1 / gamma * log(exp_t0_ + 1)
if plot:
pl.scatter(x, y)
return t0_
def fit_alpha(u, s, tau, o, beta, gamma, fit_scaling=False):
off, on = o == 0, o == 1
if on.sum() > 0 or off.sum() > 0 or tau[on].min() == 0 or tau[off].min() == 0:
alpha = None
else:
tau_on, tau_off = tau[on], tau[off]
# 'on' state
expu, exps = exp(-beta * tau_on), exp(-gamma * tau_on)
# 'off' state
t0_ = np.max(tau_on)
expu_, exps_ = exp(-beta * tau_off), exp(-gamma * tau_off)
expu0_, exps0_ = exp(-beta * t0_), exp(-gamma * t0_)
# from unspliced dynamics
c_beta = 1 / beta * (1 - expu)
c_beta_ = 1 / beta * (1 - expu0_) * expu_
# from spliced dynamics
c_gamma = (1 - exps) / gamma + (exps - expu) * inv(gamma - beta)
c_gamma_ = (
(1 - exps0_) / gamma + (exps0_ - expu0_) * inv(gamma - beta)
) * exps_ - (1 - expu0_) * (exps_ - expu_) * inv(gamma - beta)
# concatenating together
c = np.concatenate([c_beta, c_gamma, c_beta_, c_gamma_]).T
x = np.concatenate([u[on], s[on], u[off], s[off]]).T
alpha = (c * x).sum() / (c ** 2).sum()
if fit_scaling: # alternatively compute alpha and scaling simultaneously
c = np.concatenate([c_gamma, c_gamma_]).T
x = np.concatenate([s[on], s[off]]).T
alpha = (c * x).sum() / (c ** 2).sum()
c = np.concatenate([c_beta, c_beta_]).T
x = np.concatenate([u[on], u[off]]).T
scaling = (c * x).sum() / (c ** 2).sum() / alpha # ~ alpha * z / alpha
return alpha, scaling
return alpha
def fit_scaling(u, t, t_, alpha, beta):
tau, alpha, u0, _ = vectorize(t, t_, alpha, beta)
ut = unspliced(tau, u0, alpha, beta)
return (u * ut).sum() / (ut ** 2).sum()
def tau_s(s, s0, u0, alpha, beta, gamma, u=None, tau=None, eps=1e-2):
if tau is None:
tau = tau_inv(u, u0=u0, alpha=alpha, beta=beta) if u is not None else 1
tau_prev, loss, n_iter, max_iter, mixed_states = 1e6, 1e6, 0, 10, np.any(alpha == 0)
b0 = (alpha - beta * u0) * inv(gamma - beta)
g0 = s0 - alpha / gamma + b0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
while np.abs(tau - tau_prev).max() > eps and loss > eps and n_iter < max_iter:
tau_prev, n_iter = tau, n_iter + 1
expu, exps = b0 * exp(-beta * tau), g0 * exp(-gamma * tau)
f = exps - expu + alpha / gamma # >0
ft = -gamma * exps + beta * expu # >0 if on else <0
ftt = gamma ** 2 * exps - beta ** 2 * expu
a, b, c = ftt / 2, ft, f - s
term = b ** 2 - 4 * a * c
update = (-b + np.sqrt(term)) / (2 * a)
if mixed_states:
update = np.nan_to_num(update) * (alpha > 0) + (-c / b) * (alpha <= 0)
tau = (
np.nan_to_num(tau_prev + update) * (s != 0)
if np.any(term > 0)
else tau_prev / 10
)
loss = np.abs(
alpha / gamma + g0 * exp(-gamma * tau) - b0 * exp(-beta * tau) - s
).max()
return np.clip(tau, 0, None)
def assign_timepoints_projection(
u, s, alpha, beta, gamma, t0_=None, u0_=None, s0_=None, n_timepoints=300
):
if t0_ is None:
t0_ = tau_inv(u=u0_, u0=0, alpha=alpha, beta=beta)
if u0_ is None or s0_ is None:
u0_, s0_ = (
unspliced(t0_, 0, alpha, beta),
spliced(t0_, 0, 0, alpha, beta, gamma),
)
tpoints = np.linspace(0, t0_, num=n_timepoints)
tpoints_ = np.linspace(
0, tau_inv(np.min(u[s > 0]), u0=u0_, alpha=0, beta=beta), num=n_timepoints
)[1:]
xt = np.vstack(
[unspliced(tpoints, 0, alpha, beta), spliced(tpoints, 0, 0, alpha, beta, gamma)]
).T
xt_ = np.vstack(
[unspliced(tpoints_, u0_, 0, beta), spliced(tpoints_, s0_, u0_, 0, beta, gamma)]
).T
x_obs = np.vstack([u, s]).T
# assign time points (oth. projection onto 'on' and 'off' curve)
tau, o, diff = np.zeros(len(u)), np.zeros(len(u), dtype=int), np.zeros(len(u))
tau_alt, diff_alt = np.zeros(len(u)), np.zeros(len(u))
for i, xi in enumerate(x_obs):
diffs, diffs_ = (
np.linalg.norm((xt - xi), axis=1),
np.linalg.norm((xt_ - xi), axis=1),
)
idx, idx_ = np.argmin(diffs), np.argmin(diffs_)
o[i] = np.argmin([diffs_[idx_], diffs[idx]])
tau[i] = [tpoints_[idx_], tpoints[idx]][o[i]]
diff[i] = [diffs_[idx_], diffs[idx]][o[i]]
tau_alt[i] = [tpoints_[idx_], tpoints[idx]][1 - o[i]]
diff_alt[i] = [diffs_[idx_], diffs[idx]][1 - o[i]]
t = tau * o + (t0_ + tau) * (1 - o)
return t, tau, o
"""State-independent derivatives"""
def dtau(u, s, alpha, beta, gamma, u0, s0, du0=[0, 0, 0], ds0=[0, 0, 0, 0]):
a, b, g, gb, b0 = alpha, beta, gamma, gamma - beta, beta * inv(gamma - beta)
cu = s - a / g - b0 * (u - a / b)
c0 = s0 - a / g - b0 * (u0 - a / b)
cu += cu == 0
c0 += c0 == 0
cu_, c0_ = 1 / cu, 1 / c0
dtau_a = b0 / g * (c0_ - cu_) + 1 / g * c0_ * (ds0[0] - b0 * du0[0])
dtau_b = 1 / gb ** 2 * ((u - a / g) * cu_ - (u0 - a / g) * c0_)
dtau_c = -a / g * (1 / g ** 2 - 1 / gb ** 2) * (cu_ - c0_) - b0 / g / gb * (
u * cu_ - u0 * c0_
) # + 1/g**2 * np.log(cu/c0)
return dtau_a, dtau_b, dtau_c
def du(tau, alpha, beta, u0=0, du0=[0, 0, 0], dtau=[0, 0, 0]):
# du0 is the derivative du0 / d(alpha, beta, tau)
expu, cb = exp(-beta * tau), alpha / beta
du_a = (
du0[0] * expu + 1.0 / beta * (1 - expu) + (alpha - beta * u0) * dtau[0] * expu
)
du_b = (
du0[1] * expu
- cb / beta * (1 - expu)
+ (cb - u0) * tau * expu
+ (alpha - beta * u0) * dtau[1] * expu
)
return du_a, du_b
def ds(
tau, alpha, beta, gamma, u0=0, s0=0, du0=[0, 0, 0], ds0=[0, 0, 0, 0], dtau=[0, 0, 0]
):
# ds0 is the derivative ds0 / d(alpha, beta, gamma, tau)
expu, exps, = exp(-beta * tau), exp(-gamma * tau)
expus = exps - expu
cbu = (alpha - beta * u0) * inv(gamma - beta)
ccu = (alpha - gamma * u0) * inv(gamma - beta)
ccs = alpha / gamma - s0 - cbu
ds_a = (
ds0[0] * exps
+ 1.0 / gamma * (1 - exps)
+ 1 * inv(gamma - beta) * (1 - beta * du0[0]) * expus
+ (ccs * gamma * exps + cbu * beta * expu) * dtau[0]
)
ds_b = (
ds0[1] * exps
+ cbu * tau * expu
+ 1 * inv(gamma - beta) * (ccu - beta * du0[1]) * expus
+ (ccs * gamma * exps + cbu * beta * expu) * dtau[1]
)
ds_c = (
ds0[2] * exps
+ ccs * tau * exps
- alpha / gamma ** 2 * (1 - exps)
- cbu * inv(gamma - beta) * expus
+ (ccs * gamma * exps + cbu * beta * expu) * dtau[2]
)
return ds_a, ds_b, ds_c
def derivatives(
u, s, t, t0_, alpha, beta, gamma, scaling=1, alpha_=0, u0=0, s0=0, weights=None
):
o = np.array(t < t0_, dtype=int)
du0 = np.array(du(t0_, alpha, beta, u0))[:, None] * (1 - o)[None, :]
ds0 = np.array(ds(t0_, alpha, beta, gamma, u0, s0))[:, None] * (1 - o)[None, :]
tau, alpha, u0, s0 = vectorize(t, t0_, alpha, beta, gamma, alpha_, u0, s0)
dt = np.array(dtau(u, s, alpha, beta, gamma, u0, s0, du0, ds0))
# state-dependent derivatives:
du_a, du_b = du(tau, alpha, beta, u0, du0, dt)
du_a, du_b = du_a * scaling, du_b * scaling
ds_a, ds_b, ds_c = ds(tau, alpha, beta, gamma, u0, s0, du0, ds0, dt)
# evaluate derivative of likelihood:
ut, st = mRNA(tau, u0, s0, alpha, beta, gamma)
# udiff = np.array(ut * scaling - u)
udiff = np.array(ut - u / scaling)
sdiff = np.array(st - s)
if weights is not None:
udiff = np.multiply(udiff, weights)
sdiff = np.multiply(sdiff, weights)
dl_a = (du_a * (1 - o)).dot(udiff) + (ds_a * (1 - o)).dot(sdiff)
dl_a_ = (du_a * o).dot(udiff) + (ds_a * o).dot(sdiff)
dl_b = du_b.dot(udiff) + ds_b.dot(sdiff)
dl_c = ds_c.dot(sdiff)
dl_tau, dl_t0_ = None, None
return dl_a, dl_b, dl_c, dl_a_, dl_tau, dl_t0_
class BaseDynamics:
def __init__(self, adata=None, u=None, s=None):
self.s, self.u = s, u
zeros, zeros3 = np.zeros(adata.n_obs), np.zeros((3, 1))
self.u0, self.s0, self.u0_, self.s0_, self.t_, self.scaling = (
None,
None,
None,
None,
None,
None,
)
self.t, self.tau, self.o, self.weights = zeros, zeros, zeros, zeros
self.alpha, self.beta, self.gamma, self.alpha_, self.pars = (
None,
None,
None,
None,
None,
)
self.dpars, self.m_dpars, self.v_dpars, self.loss = zeros3, zeros3, zeros3, []
def uniform_weighting(self, n_regions=5, perc=95): # deprecated
from numpy import union1d as union
from numpy import intersect1d as intersect
u, s = self.u, self.s
u_b = np.linspace(0, np.percentile(u, perc), n_regions)
s_b = np.linspace(0, np.percentile(s, perc), n_regions)
regions, weights = {}, np.ones(len(u))
for i in range(n_regions):
if i == 0:
region = intersect(np.where(u < u_b[i + 1]), np.where(s < s_b[i + 1]))
elif i < n_regions - 1:
lower_cut = union(np.where(u > u_b[i]), np.where(s > s_b[i]))
upper_cut = intersect(
np.where(u < u_b[i + 1]), np.where(s < s_b[i + 1])
)
region = intersect(lower_cut, upper_cut)
else:
region = union(
np.where(u > u_b[i]), np.where(s > s_b[i])
) # lower_cut for last region
regions[i] = region
if len(region) > 0:
weights[region] = n_regions / len(region)
# set weights accordingly such that each region has an equal overall contribution.
self.weights = weights * len(u) / np.sum(weights)
self.u_b, self.s_b = u_b, s_b
def plot_regions(self):
u, s, ut, st = self.u, self.s, self.ut, self.st
u_b, s_b = self.u_b, self.s_b
pl.figure(dpi=100)
pl.scatter(s, u, color="grey")
pl.xlim(0)
pl.ylim(0)
pl.xlabel("spliced")
pl.ylabel("unspliced")
for i in range(len(s_b)):
pl.plot([s_b[i], s_b[i], 0], [0, u_b[i], u_b[i]])
def plot_derivatives(self):
u, s = self.u, self.s
alpha, beta, gamma = self.alpha, self.beta, self.gamma
t, tau, o, t_ = self.t, self.tau, self.o, self.t_
du0 = np.array(du(t_, alpha, beta))[:, None] * (1 - o)[None, :]
ds0 = np.array(ds(t_, alpha, beta, gamma))[:, None] * (1 - o)[None, :]
tau, alpha, u0, s0 = vectorize(t, t_, alpha, beta, gamma)
dt = np.array(dtau(u, s, alpha, beta, gamma, u0, s0))
du_a, du_b = du(tau, alpha, beta, u0=u0, du0=du0, dtau=dt)
ds_a, ds_b, ds_c = ds(
tau, alpha, beta, gamma, u0=u0, s0=s0, du0=du0, ds0=ds0, dtau=dt
)
idx = np.argsort(t)
t = np.sort(t)
pl.plot(t, du_a[idx], label=r"$\partial u / \partial\alpha$")
pl.plot(t, 0.2 * du_b[idx], label=r"$\partial u / \partial \beta$")
pl.plot(t, ds_a[idx], label=r"$\partial s / \partial \alpha$")
pl.plot(t, ds_b[idx], label=r"$\partial s / \partial \beta$")
pl.plot(t, 0.2 * ds_c[idx], label=r"$\partial s / \partial \gamma$")
pl.legend()
pl.xlabel("t")
class DynamicsRecovery(BaseDynamics):
def __init__(
self,
adata=None,
gene=None,
u=None,
s=None,
use_raw=False,
load_pars=None,
fit_scaling=False,
fit_time=True,
fit_switching=True,
fit_steady_states=True,
fit_alpha=True,
fit_connected_states=True,
):
super(DynamicsRecovery, self).__init__(adata.n_obs)
_layers = adata[:, gene].layers
self.gene = gene
self.use_raw = use_raw = use_raw or "Ms" not in _layers.keys()
# extract actual data
if u is None or s is None:
u = (
make_dense(_layers["unspliced"])
if use_raw
else make_dense(_layers["Mu"])
)
s = make_dense(_layers["spliced"]) if use_raw else make_dense(_layers["Ms"])
self.s, self.u = s, u
# set weights for fitting (exclude dropouts and extreme outliers)
nonzero = | np.ravel(s > 0) | numpy.ravel |
import keras
import sys
import os
import shutil
# Allow relative imports when being executed as script.
if __name__ == "__main__" and __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..'))
import keras_retinanet_3D.bin # noqa: F401
__package__ = "keras_retinanet_3D.bin"
from .. import models
from ..utils.image import read_image_bgr, preprocess_image, resize_image
from ..utils.visualization import draw_3d_detections_from_pose, drawdashedline, draw_detections_with_keypoints, draw_box, draw_caption
# import miscellaneous modules
import cv2
import numpy as np
import time
import scipy.io
import argparse
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
def parse_args(args):
""" Parse the arguments.
"""
parser = argparse.ArgumentParser(description='Simple script for running the network on a directory of images.')
parser.add_argument('model_path', help='Path to inference model.', type=str)
parser.add_argument('image_dir', help='Path to directory of input images.', type=str)
parser.add_argument('calib_dir', help='Path to directory of calibration files.', type=str)
parser.add_argument('plane_params_path', help='Path to .MAT file containing road planes.', type=str)
parser.add_argument('output_dir', help='Path to output directory', type=str)
parser.add_argument('--kitti', help='Include to save results in KITTI format.', action='store_true')
parser.add_argument('--save-images', help='Include to save result images.', action='store_true')
parser.add_argument('--backbone', help='The backbone of the model to load.', default='resnet50')
return parser.parse_args(args)
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def load_calibration(calib_path, image_scale):
""" Load inverse of camera projection matrix from file.
"""
cam_id = 2
with open(calib_path, 'r') as f:
line = f.readlines()[cam_id]
key, value = line.split(':', 1)
P = np.array([float(x) for x in value.split()]).reshape((3, 4))
P = np.dot(np.array([[image_scale, 0.0, 0.0], [0.0, image_scale, 0.0], [0.0, 0.0, 1.0]]), P)
P_inv = np.linalg.pinv(P)
return (P, P_inv)
def main(args=None):
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
# load retinanet model
model = models.load_model(args.model_path, backbone_name=args.backbone)
#print(model.summary())
# load all road planes
plane_params = scipy.io.loadmat(args.plane_params_path)['road_planes_database']
# create necessary output directories
output_dir = os.path.join(args.output_dir, os.path.basename(args.model_path)[:-3])
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
os.mkdir(os.path.join(output_dir, 'outputs'))
os.mkdir(os.path.join(output_dir, 'outputs', 'full'))
if args.kitti:
os.mkdir(os.path.join(output_dir, 'outputs', 'kitti'))
if args.save_images:
os.mkdir(os.path.join(output_dir, 'images'))
os.mkdir(os.path.join(output_dir, 'images', 'composite'))
for j, fn in enumerate(os.listdir(args.calib_dir)):
calib_fp = os.path.join(args.calib_dir, fn)
image_fp = os.path.join(args.image_dir, fn.replace('.txt', '.png'))
# load image
raw_image = read_image_bgr(image_fp)
# preprocess image for network
image = preprocess_image(raw_image)
image, scale = resize_image(image)
# load calibration parameters
P, P_inv = load_calibration(calib_fp, scale)
# construct inputs
inputs = [np.expand_dims(image, axis=0), np.expand_dims(P_inv, axis=0), np.expand_dims(plane_params, axis=0)]
# process image
start = time.time()
# run network
boxes, dimensions, scores, labels, orientations, keypoints, keyplanes, residuals = model.predict_on_batch(inputs)[:8]
print("Image {}: frame rate: {:.2f}".format(j, 1.0 / (time.time() - start)))
# correct for image scale
boxes /= scale
P = np.dot(np.array([[1.0/scale, 0.0, 0.0], [0.0, 1.0/scale, 0.0], [0.0, 0.0, 1.0]]), P)
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > 0.05)[0]
# select those scores
scores = scores[0][indices]
# find the order with which to sort the scores
max_detections = 100
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
boxes = boxes[0, indices[scores_sort], :]
dimensions = dimensions[0, indices[scores_sort], :]
scores = scores[scores_sort]
labels = labels[0, indices[scores_sort]]
orientations = orientations[0, indices[scores_sort]]
keypoints = np.reshape(keypoints[0, indices[scores_sort], :, :], (-1, 12))
keyplanes = np.reshape(keyplanes[0, indices[scores_sort], :, :], (-1, 4))
residuals = residuals[0, indices[scores_sort]]
angles = np.empty_like(dimensions)
locations = | np.empty_like(dimensions) | numpy.empty_like |
"""
Prepare data for Part-GPNN model.
Need:
Node feature at different scales
Edge feature for valid edges
Adjacency matrix GT (parse graph GT)
Edge weight (corresponds to node level)
Edge label GT
"""
import os
import sys
import json
import pickle
import warnings
from collections import defaultdict
import numpy as np
import skimage.io
import cv2
import feature_model
import metadata
import torch
import torch.autograd
import torchvision.models
import vsrl_utils as vu
local = False
part_ids = {'Right Shoulder': [2],
'Left Shoulder': [5],
'Knee Right': [10],
'Knee Left': [13],
'Ankle Right': [11],
'Ankle Left': [14],
'Elbow Left': [6],
'Elbow Right': [3],
'Hand Left': [7],
'Hand Right': [4],
'Head': [0],
'Hip': [8],
'Upper Body': [2,5,6,3,7,4,0,8],
'Lower Body': [10,13,11,14,8],
'Left Arm': [5,6,7],
'Right Arm': [2,3,4],
'Left Leg': [8,10,11],
'Right Leg': [8,13,14],
'Full Body': [2,5,10,13,11,14,6,3,7,4,0,8],
}
__PART_WEIGHT_L1 = 0.1 # hand
__PART_WEIGHT_L2 = 0.3 # arm
__PART_WEIGHT_L3 = 0.5 # upper body
__PART_WEIGHT_L4 = 1.0 # human
part_weights = {'Right Shoulder': __PART_WEIGHT_L1,
'Left Shoulder': __PART_WEIGHT_L1,
'Knee Right': __PART_WEIGHT_L1,
'Knee Left': __PART_WEIGHT_L1,
'Ankle Right': __PART_WEIGHT_L1,
'Ankle Left': __PART_WEIGHT_L1,
'Elbow Left': __PART_WEIGHT_L1,
'Elbow Right': __PART_WEIGHT_L1,
'Hand Left': __PART_WEIGHT_L1,
'Hand Right': __PART_WEIGHT_L1,
'Head': __PART_WEIGHT_L1,
'Hip': __PART_WEIGHT_L1,
'Upper Body': __PART_WEIGHT_L3,
'Lower Body': __PART_WEIGHT_L3,
'Left Arm': __PART_WEIGHT_L2,
'Right Arm': __PART_WEIGHT_L2,
'Left Leg': __PART_WEIGHT_L2,
'Right Leg': __PART_WEIGHT_L2,
'Full Body': __PART_WEIGHT_L4}
part_names = list(part_ids.keys())
part_graph = {'Right Shoulder': [],
'Left Shoulder': [],
'Knee Right': [],
'Knee Left': [],
'Ankle Right': [],
'Ankle Left': [],
'Elbow Left': [],
'Elbow Right': [],
'Hand Left': [],
'Hand Right': [],
'Head': [],
'Hip': [],
'Upper Body': ['Head', 'Hip', 'Left Arm', 'Right Arm'],
'Lower Body': ['Hip', 'Left Leg', 'Right Leg'],
'Left Arm': ['Left Shoulder', 'Elbow Left', 'Hand Left'],
'Right Arm': ['Right Shoulder', 'Elbow Right', 'Hand Right'],
'Left Leg': ['Hip', 'Knee Left', 'Ankle Left'],
'Right Leg': ['Hip', 'Knee Right', 'Ankle Right'],
'Full Body': ['Upper Body', 'Lower Body']
}
def get_intersection(box1, box2):
return np.hstack((np.maximum(box1[:2], box2[:2]), np.minimum(box1[2:], box2[2:])))
def compute_area(box):
side1 = box[2]-box[0]
side2 = box[3]-box[1]
if side1 > 0 and side2 > 0:
return side1 * side2
else:
return 0.0
def compute_iou(box1, box2):
intersection_area = compute_area(get_intersection(box1, box2))
iou = intersection_area / (compute_area(box1) + compute_area(box2) - intersection_area)
return iou
def get_node_index(bbox, det_boxes, index_list):
bbox = np.array(bbox, dtype=np.float32)
max_iou = 0.5 # Use 0.5 as a threshold for evaluation
max_iou_index = -1
for i_node in index_list:
# check bbox overlap
iou = compute_iou(bbox, det_boxes[i_node])
if iou > max_iou:
max_iou = iou
max_iou_index = i_node
return max_iou_index
def combine_box(box1, box2):
return np.hstack((np.minimum(box1[:2], box2[:2]), np.maximum(box1[2:], box2[2:])))
def get_box(_box, human_boxes_all, used_human):
max_iou = 0.5
best_box = None
best_i = None
for i, box in enumerate(human_boxes_all):
if i in used_human:
continue
iou = compute_iou(_box, box)
if iou > max_iou:
max_iou = iou
best_box = box
best_i = i
return best_i, human_boxes_all[best_i]
def draw_box(box, color='blue'):
x0,y0,x1,y1 = box
plt.plot([x0,x1,x1,x0,x0], [y0,y0,y1,y1,y0], c=color)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def img_to_torch(img):
"""
input: H x W x C img iterables with range 0-255
output: C x H x W img tensor with range 0-1, normalized
"""
img = np.array(img) / 255.
img = (img - mean) / std
if len(img.shape) == 3:
img = np.expand_dims(img.transpose([2,0,1]), axis=0)
elif len(img.shape) == 4:
img = img.transpose([0,3,1,2])
elif len(img.shape) == 5:
img = img.transpose([0,1,4,2,3])
img = torch.autograd.Variable(torch.Tensor(img)).cuda()
return img
meta_dir = os.path.join(os.path.dirname(__file__), '../../../data/vcoco_features')
if local:
img_dir = '/home/tengyu/Data/mscoco/coco'
vcoco_root = '/home/tengyu/Data/mscoco/v-coco/data'
save_data_path = '/home/tengyu/Documents/github/Part-GPNN/data/feature_resnet_tengyu2'
else:
img_dir = '/home/tengyu/dataset/mscoco/images'
checkpoint_dir = '/home/tengyu/github/Part-GPNN/data/model_resnet_noisy/finetune_resnet'
vcoco_root = '/home/tengyu/dataset/v-coco/data'
save_data_path = '/home/tengyu/github/Part-GPNN/data/feature_resnet_tengyu2'
obj_action_pair = pickle.load(open(os.path.join(os.path.dirname(__file__), 'obj_action_pairs.pkl'), 'rb'))
os.makedirs(save_data_path, exist_ok=True)
if not local:
feature_network = feature_model.Resnet152(num_classes=len(metadata.action_classes))
feature_network.cuda()
best_model_file = os.path.join(checkpoint_dir, 'model_best.pth')
checkpoint = torch.load(best_model_file)
for k in list(checkpoint['state_dict'].keys()):
if k[:7] == 'module.':
checkpoint['state_dict'][k[7:]] = checkpoint['state_dict'][k]
del checkpoint['state_dict'][k]
feature_network.load_state_dict(checkpoint['state_dict'])
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
input_h, input_w = 224, 224
part_eye = np.eye(21)
obj_eye = | np.eye(81) | numpy.eye |
from hydroDL.post import axplot, figplot
from hydroDL.new import fun
from hydroDL.app import waterQuality
import importlib
import matplotlib.pyplot as plt
from scipy.stats import gamma
import numpy as np
import random
importlib.reload(fun)
kLst = [1, 2, 5, 10, 20]
# flow duration curves
t = np.arange(365)
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
for k in kLst:
q = fun.fdc(t, k)
print(np.sum(q))
ax.plot(t, q, label='a={}'.format(k))
ax.legend()
ax.set_title('flow duration curve')
fig.show()
# kate's model
rLst = [0, 0.1, 0.2, 0.5, 1, 2]
t = np.arange(365)
fig, ax = plt.subplots(1, 1, figsize=(12, 6))
for r in rLst:
ct = fun.kate(t, r)
ax.plot(t, ct, label='tw={}'.format(r))
ax.legend()
ax.set_title('concentration with travel time')
fig.show()
# prcp
t = | np.arange('2000-01-01', '2005-01-01', dtype='datetime64[D]') | numpy.arange |
import torch
import time
import numpy as np
import torch
import open3d as o3d
from torch.utils.data import DataLoader, Dataset, ConcatDataset, random_split
from .event_utils import gen_discretized_event_volume, normalize_event_volume
from easydict import EasyDict
from tqdm import tqdm
import os
import cv2
import pdb
from scipy import ndimage
class TrainerDataset(object):
def __init__(self):
super(TrainerDataset, self).__init__()
def build_dataset(self):
classes = ["02691156",
"02828884",
"02933112",
"02958343",
"03001627",
"03211117",
"03636649",
"03691459",
"04090263",
"04256520",
"04379243",
"04401088",
"04530566"]
if self.opt.random_data:
dset_all = RandomShapeNet(class_name=classes[0])
else:
dset_all = EvShapeNet(class_name=classes[0], use_mask_input=self.opt.use_mask_input)
train_len = int(0.9 * len(dset_all))
val_len = len(dset_all) - train_len
train_dataset, val_dataset = random_split(dset_all, [train_len, val_len])
self.datasets = EasyDict()
# Create Datasets
self.datasets.dataset_train = train_dataset
self.datasets.dataset_test = val_dataset
if not self.opt.demo:
# Create dataloaders
self.datasets.dataloader_train = torch.utils.data.DataLoader(self.datasets.dataset_train,
batch_size=self.opt.batch_size,
shuffle=True,
num_workers=int(self.opt.workers))
self.datasets.dataloader_test = torch.utils.data.DataLoader(self.datasets.dataset_test,
batch_size=self.opt.batch_size_test,
shuffle=True, num_workers=int(self.opt.workers))
self.datasets.len_dataset = len(self.datasets.dataset_train)
self.datasets.len_dataset_test = len(self.datasets.dataset_test)
class EvShapeNet(Dataset):
def __init__(self, width=256,
height=256,
volume_time_slices=10,
delta_t=0.01,
mode='train',
class_name=None,
use_mask_input=False,
num_views=45,
meta_path='/Datasets/cwang/event_shapenet/shapenet_r2n2.txt',
event_folder = '/Datasets/cwang/event_shapenet_corrected_events',
gt_folder='/Datasets/cwang/event_shapenet_corrected'):
self.width = width
self.height = height
self.volume_time_slices = volume_time_slices
self.mode = mode
self.class_name = class_name
self.event_folder = event_folder
self.gt_folder = gt_folder
self.delta_t = delta_t
self.use_mask_input = use_mask_input
self.num_views = num_views
self.paths = self.read_meta(gt_folder, meta_path, class_name=class_name)
print("There are {} objects in the current dataset".format(len(self.paths)))
def read_meta(self, data_folder, meta_file, class_name=None):
classes = [c for c in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, c))]
meta_file = open(meta_file, 'r')
all_paths = []
# generate list of models
for l in meta_file.readlines():
l = l.strip("\n")
if class_name is None or class_name in l:
split_name = l.split("/")
cname = split_name[0]
oname = split_name[1]
model_path = os.path.join(cname, oname)
# TODO: hack check if the events are generated
event_path = os.path.join(self.event_folder, model_path, "events.npz")
if os.path.exists(event_path):
all_paths.append(model_path)
return all_paths
def __len__(self):
return len(self.paths)
def rotate(self, inputs, x, axis=[1, 2]):
return ndimage.rotate(inputs, x, reshape=False, axes=axis)
def __getitem__(self, index):
path = self.paths[index]
output = {}
# find events based on image time
if self.use_mask_input:
# read sil masks
masks = []
for i in range(45):
data = np.load(os.path.join(self.gt_folder, path, "{:05}_gt.npz".format(i)))
masks.append(data['sil_mask'])
network_input = np.stack(masks, axis=0).astype(np.float32)
else:
try:
event_data = dict(np.load(os.path.join(self.event_folder, path, "events.npz")))
event_volume = gen_discretized_event_volume(
torch.from_numpy(event_data['x']).long(),
torch.from_numpy(event_data['y']).long(),
torch.from_numpy(event_data['t'].astype(np.float32)),
torch.from_numpy(event_data['p']),
[self.volume_time_slices*2,
self.height,
self.width])
network_input = normalize_event_volume(event_volume).float()
except:
print("Invalid Path:", path)
model = o3d.io.read_triangle_mesh(os.path.join(self.gt_folder, path, "model.obj"))
# sample 1000 points from model
points = np.array(model.sample_points_uniformly(number_of_points=1000).points)
# normalize events and convert to event volume
# get sample points
output = {
"input_data": network_input,
"points": points.astype(np.float32)
}
return output
class RandomShapeNet(Dataset):
def __init__(self, width=256,
height=256,
volume_time_slices=10,
delta_t=0.01,
mode='train',
class_name=None,
meta_path='/Datasets/cwang/event_shapenet/shapenet_r2n2.txt',
event_folder = '/Datasets/cwang/event_shapenet_corrected_events',
gt_folder='/Datasets/cwang/event_shapenet_corrected'):
self.width = width
self.height = height
self.volume_time_slices = volume_time_slices
self.mode = mode
self.class_name = class_name
self.event_folder = event_folder
self.gt_folder = gt_folder
self.delta_t = delta_t
self.paths = self.read_meta(gt_folder, meta_path, class_name=class_name)
print("There are {} objects in the current dataset".format(len(self.paths)))
def read_meta(self, data_folder, meta_file, class_name=None):
classes = [c for c in os.listdir(data_folder) if os.path.isdir(os.path.join(data_folder, c))]
meta_file = open(meta_file, 'r')
all_paths = []
# generate list of models
for l in meta_file.readlines():
l = l.strip("\n")
if class_name is None or class_name in l:
split_name = l.split("/")
cname = split_name[0]
oname = split_name[1]
model_path = os.path.join(cname, oname)
event_path = os.path.join(self.event_folder, model_path, "events.npz")
if os.path.exists(event_path):
all_paths.append(model_path)
return all_paths
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
# find events based on image time
model = o3d.io.read_triangle_mesh(os.path.join(self.gt_folder, path, "model.obj"))
# sample 1000 points from model
points = np.array(model.sample_points_uniformly(number_of_points=1000).points)
# normalize events and convert to event volume
event_volume = | np.ones([self.volume_time_slices*2, self.height, self.width]) | numpy.ones |
# Copyright 2009-2011 by <NAME>. All rights reserved.
# Revisions copyright 2009-2013 by <NAME>. All rights reserved.
# Revisions copyright 2013 <NAME>. All rights reserved.
#
# Converted by <NAME> from an older unit test copyright 2002
# by <NAME>.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Unit tests for the Bio.PDB module."""
from __future__ import print_function
from copy import deepcopy
import os
import sys
import tempfile
import unittest
import warnings
from Bio._py3k import StringIO
try:
import numpy
from numpy import dot # Missing on old PyPy's micronumpy
del dot
from numpy.linalg import svd, det # Missing in PyPy 2.0 numpypy
from numpy.random import random
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NumPy if you want to use Bio.PDB.")
from Bio import BiopythonWarning
from Bio.Seq import Seq
from Bio.Alphabet import generic_protein
from Bio.PDB import PDBParser, PPBuilder, CaPPBuilder, PDBIO, Select, MMCIFParser, MMCIFIO
from Bio.PDB.MMCIF2Dict import MMCIF2Dict
from Bio.PDB import HSExposureCA, HSExposureCB, ExposureCN
from Bio.PDB.PDBExceptions import PDBConstructionException, PDBConstructionWarning
from Bio.PDB import rotmat, Vector, refmat, calc_angle, calc_dihedral, rotaxis, m2rotaxis
from Bio.PDB import Residue, Atom
from Bio.PDB import make_dssp_dict
from Bio.PDB import DSSP
from Bio.PDB.NACCESS import process_asa_data, process_rsa_data
from Bio.PDB.ResidueDepth import _get_atom_radius
# NB: the 'A_' prefix ensures this test case is run first
class A_ExceptionTest(unittest.TestCase):
"""Errors and warnings while parsing of flawed PDB files.
These tests must be executed because of the way Python's warnings module
works -- a warning is only logged the first time it is encountered.
"""
def test_1_warnings(self):
"""Check warnings: Parse a flawed PDB file in permissive mode."""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', PDBConstructionWarning)
# Trigger warnings
p = PDBParser(PERMISSIVE=True)
p.get_structure("example", "PDB/a_structure.pdb")
self.assertEqual(len(w), 14)
for wrn, msg in zip(w, [
# Expected warning messages:
"Used element 'N' for Atom (name=N) with given element ''",
"Used element 'C' for Atom (name=CA) with given element ''",
"Atom names ' CA ' and 'CA ' differ only in spaces at line 17.",
"Used element 'CA' for Atom (name=CA ) with given element ''",
'Atom N defined twice in residue <Residue ARG het= resseq=2 icode= > at line 21.',
'disordered atom found with blank altloc before line 33.',
"Residue (' ', 4, ' ') redefined at line 43.",
"Blank altlocs in duplicate residue SER (' ', 4, ' ') at line 43.",
"Residue (' ', 10, ' ') redefined at line 75.",
"Residue (' ', 14, ' ') redefined at line 106.",
"Residue (' ', 16, ' ') redefined at line 135.",
"Residue (' ', 80, ' ') redefined at line 633.",
"Residue (' ', 81, ' ') redefined at line 646.",
'Atom O defined twice in residue <Residue HOH het=W resseq=67 icode= > at line 823.'
]):
self.assertIn(msg, str(wrn))
def test_2_strict(self):
"""Check error: Parse a flawed PDB file in strict mode."""
parser = PDBParser(PERMISSIVE=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PDBConstructionWarning)
self.assertRaises(PDBConstructionException,
parser.get_structure, "example", "PDB/a_structure.pdb")
self.assertEqual(len(w), 4, w)
def test_3_bad_xyz(self):
"""Check error: Parse an entry with bad x,y,z value."""
data = "ATOM 9 N ASP A 152 21.554 34.953 27.691 1.00 19.26 N\n"
parser = PDBParser(PERMISSIVE=False)
s = parser.get_structure("example", StringIO(data))
data = "ATOM 9 N ASP A 152 21.ish 34.953 27.691 1.00 19.26 N\n"
self.assertRaises(PDBConstructionException,
parser.get_structure, "example", StringIO(data))
def test_4_occupancy(self):
"""Parse file with missing occupancy"""
permissive = PDBParser(PERMISSIVE=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", PDBConstructionWarning)
structure = permissive.get_structure("test", "PDB/occupancy.pdb")
self.assertEqual(len(w), 3, w)
atoms = structure[0]['A'][(' ', 152, ' ')]
# Blank occupancy behavior set in Bio/PDB/PDBParser
self.assertEqual(atoms['N'].get_occupancy(), None)
self.assertEqual(atoms['CA'].get_occupancy(), 1.0)
self.assertEqual(atoms['C'].get_occupancy(), 0.0)
strict = PDBParser(PERMISSIVE=False)
self.assertRaises(PDBConstructionException,
strict.get_structure, "test", "PDB/occupancy.pdb")
class HeaderTests(unittest.TestCase):
"""Tests for parse_pdb_header."""
def test_capsid(self):
"""Parse the header of a known PDB file (1A8O)."""
parser = PDBParser()
struct = parser.get_structure('1A8O', 'PDB/1A8O.pdb')
self.assertAlmostEqual(struct.header['resolution'], 1.7)
# Case-insensitive string comparisons
known_strings = {
'author': 'T.R.Gamble,S.Yoo,F.F.Vajdos,<NAME>,D.K.Worthylake,H.Wang,J.P.Mccutcheon,W.I.Sundquist,C.P.Hill',
'deposition_date': '1998-03-27',
'head': 'viral protein',
'journal': 'AUTH T.R.GAMBLE,S.YOO,F.F.VAJDOS,U.K.VON SCHWEDLER,AUTH 2 D.K.WORTHYLAKE,H.WANG,J.P.MCCUTCHEON,W.I.SUNDQUIST,AUTH 3 C.P.HILLTITL STRUCTURE OF THE CARBOXYL-TERMINAL DIMERIZATIONTITL 2 DOMAIN OF THE HIV-1 CAPSID PROTEIN.REF SCIENCE V. 278 849 1997REFN ISSN 0036-8075PMID 9346481DOI 10.1126/SCIENCE.278.5339.849',
'journal_reference': 't.r.gamble,s.yoo,f.f.vajdos,u.k.von schwedler, d.k.worthylake,h.wang,j.p.mccutcheon,w.i.sundquist, c.p.hill structure of the carboxyl-terminal dimerization domain of the hiv-1 capsid protein. science v. 278 849 1997 issn 0036-8075 9346481 10.1126/science.278.5339.849 ',
'keywords': 'capsid, core protein, hiv, c-terminal domain, viral protein',
'name': ' hiv capsid c-terminal domain',
'release_date': '1998-10-14',
'structure_method': 'x-ray diffraction',
}
for key, expect in known_strings.items():
self.assertEqual(struct.header[key].lower(), expect.lower())
def test_fibril(self):
"""Parse the header of another PDB file (2BEG)."""
parser = PDBParser()
struct = parser.get_structure('2BEG', 'PDB/2BEG.pdb')
known_strings = {
'author': 'T.Luhrs,C.Ritter,M.Adrian,D.Riek-Loher,B.Bohrmann,H.Dobeli,D.Schubert,R.Riek',
'deposition_date': '2005-10-24',
'head': 'protein fibril',
'journal': "AUTH T.LUHRS,C.RITTER,M.ADRIAN,D.RIEK-LOHER,B.BOHRMANN,AUTH 2 H.DOBELI,D.SCHUBERT,R.RIEKTITL 3D STRUCTURE OF ALZHEIMER'S AMYLOID-{BETA}(1-42)TITL 2 FIBRILS.REF PROC.NATL.ACAD.SCI.USA V. 102 17342 2005REFN ISSN 0027-8424PMID 16293696DOI 10.1073/PNAS.0506723102",
'journal_reference': "t.luhrs,c.ritter,m.adrian,d.riek-loher,b.bohrmann, h.dobeli,d.schubert,r.riek 3d structure of alzheimer's amyloid-{beta}(1-42) fibrils. proc.natl.acad.sci.usa v. 102 17342 2005 issn 0027-8424 16293696 10.1073/pnas.0506723102 ",
'keywords': "alzheimer's, fibril, protofilament, beta-sandwich, quenched hydrogen/deuterium exchange, pairwise mutagenesis, protein fibril",
'name': " 3d structure of alzheimer's abeta(1-42) fibrils",
'release_date': '2005-11-22',
'structure_method': 'solution nmr',
}
for key, expect in known_strings.items():
self.assertEqual(struct.header[key].lower(), expect.lower())
class ParseTest(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
p = PDBParser(PERMISSIVE=1)
self.structure = p.get_structure("example", "PDB/a_structure.pdb")
def test_c_n(self):
"""Extract polypeptides using C-N."""
ppbuild = PPBuilder()
polypeptides = ppbuild.build_peptides(self.structure[1])
self.assertEqual(len(polypeptides), 1)
pp = polypeptides[0]
# Check the start and end positions
self.assertEqual(pp[0].get_id()[1], 2)
self.assertEqual(pp[-1].get_id()[1], 86)
# Check the sequence
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("RCGSQGGGSTCPGLRCCSIWGWCGDSEPYCGRTCENKCWSGER"
"SDHRCGAAVGNPPCGQDRCCSVHGWCGGGNDYCSGGNCQYRC",
str(s))
def test_ca_ca(self):
"""Extract polypeptides using CA-CA."""
ppbuild = CaPPBuilder()
polypeptides = ppbuild.build_peptides(self.structure[1])
self.assertEqual(len(polypeptides), 1)
pp = polypeptides[0]
# Check the start and end positions
self.assertEqual(pp[0].get_id()[1], 2)
self.assertEqual(pp[-1].get_id()[1], 86)
# Check the sequence
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("RCGSQGGGSTCPGLRCCSIWGWCGDSEPYCGRTCENKCWSGER"
"SDHRCGAAVGNPPCGQDRCCSVHGWCGGGNDYCSGGNCQYRC",
str(s))
def test_structure(self):
"""Verify the structure of the parsed example PDB file."""
# Structure contains 2 models
self.assertEqual(len(self.structure), 2)
# --- Checking model 0 ---
m0 = self.structure[0]
# Model 0 contains 1 chain
self.assertEqual(len(m0), 1)
# Chain 'A' contains 1 residue
self.assertEqual(len(m0['A']), 1)
# Residue ('H_PCA', 1, ' ') contains 8 atoms.
residue = m0['A'].get_list()[0]
self.assertEqual(residue.get_id(), ('H_PCA', 1, ' '))
self.assertEqual(len(residue), 9)
# --- Checking model 1 ---
m1 = self.structure[1]
# Model 1 contains 3 chains
self.assertEqual(len(m1), 3)
# Deconstruct this data structure to check each chain
chain_data = [ # chain_id, chain_len, [(residue_id, residue_len), ...]
('A', 86, [((' ', 0, ' '), 1),
((' ', 2, ' '), 11),
((' ', 3, ' '), 6, 1), # disordered
((' ', 4, ' '), 4),
((' ', 5, ' '), 6),
((' ', 6, ' '), 9),
((' ', 7, ' '), 4),
((' ', 8, ' '), 4),
((' ', 9, ' '), 4),
((' ', 10, ' '), 6, ['GLY', 'SER']), # point mut
((' ', 11, ' '), 7),
((' ', 12, ' '), 6),
((' ', 13, ' '), 7),
((' ', 14, ' '), 4, ['ALA', 'GLY']), # point mut
((' ', 15, ' '), 8, 3), # disordered
((' ', 16, ' '), 11, ['ARG', 'TRP']), # point mut
((' ', 17, ' '), 6),
((' ', 18, ' '), 6),
((' ', 19, ' '), 6),
((' ', 20, ' '), 8),
((' ', 21, ' '), 14),
((' ', 22, ' '), 4),
((' ', 23, ' '), 14),
((' ', 24, ' '), 6),
((' ', 25, ' '), 4),
((' ', 26, ' '), 8),
((' ', 27, ' '), 6),
((' ', 28, ' '), 9, 5), # disordered
((' ', 29, ' '), 7),
((' ', 30, ' '), 12),
((' ', 31, ' '), 6),
((' ', 32, ' '), 4),
((' ', 33, ' '), 11),
((' ', 34, ' '), 7),
((' ', 35, ' '), 6),
((' ', 36, ' '), 9),
((' ', 37, ' '), 8),
((' ', 38, ' '), 9),
((' ', 39, ' '), 6),
((' ', 40, ' '), 14),
((' ', 41, ' '), 6),
((' ', 42, ' '), 4),
((' ', 43, ' '), 9),
((' ', 44, ' '), 11),
((' ', 45, ' '), 6, 1), # disordered
((' ', 46, ' '), 8),
((' ', 47, ' '), 10),
((' ', 48, ' '), 11),
((' ', 49, ' '), 6),
((' ', 50, ' '), 4),
((' ', 51, ' '), 5),
((' ', 52, ' '), 5),
((' ', 53, ' '), 7),
((' ', 54, ' '), 4),
((' ', 55, ' '), 8),
((' ', 56, ' '), 7),
((' ', 57, ' '), 7),
((' ', 58, ' '), 6),
((' ', 59, ' '), 4),
((' ', 60, ' '), 9),
((' ', 61, ' '), 8),
((' ', 62, ' '), 11),
((' ', 63, ' '), 6),
((' ', 64, ' '), 6),
((' ', 65, ' '), 6),
((' ', 66, ' '), 7),
((' ', 67, ' '), 10),
((' ', 68, ' '), 4),
((' ', 69, ' '), 14),
((' ', 70, ' '), 6),
((' ', 71, ' '), 4),
((' ', 72, ' '), 4),
((' ', 73, ' '), 4),
((' ', 74, ' '), 8, 3), # disordered
((' ', 75, ' '), 8),
((' ', 76, ' '), 12),
((' ', 77, ' '), 6),
((' ', 78, ' '), 6),
((' ', 79, ' '), 4, 4), # disordered
((' ', 80, ' '), 4, ['GLY', 'SER']), # point mut
((' ', 81, ' '), 8, ['ASN', 'LYS']), # point mut
((' ', 82, ' '), 6),
((' ', 83, ' '), 9),
((' ', 84, ' '), 12),
((' ', 85, ' '), 11),
((' ', 86, ' '), 6),
]),
('B', 5, [(('W', 0, ' '), 1),
(('H_NAG', 1, ' '), 14),
(('H_NAG', 2, ' '), 14),
(('H_NAG', 4, ' '), 14),
(('H_NAG', 3, ' '), 14),
]),
(' ', 76, [(('W', 1, ' '), 1),
(('W', 2, ' '), 1),
(('W', 3, ' '), 1),
(('W', 4, ' '), 1),
(('W', 5, ' '), 1),
(('W', 6, ' '), 1),
(('W', 7, ' '), 1),
(('W', 8, ' '), 1),
(('W', 9, ' '), 1),
(('W', 10, ' '), 1),
(('W', 11, ' '), 1),
(('W', 12, ' '), 1),
(('W', 13, ' '), 1),
(('W', 14, ' '), 1),
(('W', 15, ' '), 1),
(('W', 16, ' '), 1),
(('W', 17, ' '), 1),
(('W', 18, ' '), 1),
(('W', 19, ' '), 1),
(('W', 20, ' '), 1),
(('W', 21, ' '), 1),
(('W', 22, ' '), 1),
(('W', 23, ' '), 1),
(('W', 24, ' '), 1),
(('W', 25, ' '), 1),
(('W', 26, ' '), 1),
(('W', 27, ' '), 1),
(('W', 28, ' '), 1),
(('W', 29, ' '), 1),
(('W', 30, ' '), 1),
(('W', 31, ' '), 1),
(('W', 32, ' '), 1),
(('W', 33, ' '), 1),
(('W', 34, ' '), 1),
(('W', 35, ' '), 1),
(('W', 36, ' '), 1),
(('W', 37, ' '), 1),
(('W', 38, ' '), 1),
(('W', 39, ' '), 1),
(('W', 40, ' '), 1),
(('W', 41, ' '), 1),
(('W', 42, ' '), 1),
(('W', 43, ' '), 1),
(('W', 44, ' '), 1),
(('W', 45, ' '), 1),
(('W', 46, ' '), 1),
(('W', 47, ' '), 1),
(('W', 48, ' '), 1),
(('W', 49, ' '), 1),
(('W', 50, ' '), 1),
(('W', 51, ' '), 1),
(('W', 52, ' '), 1),
(('W', 53, ' '), 1),
(('W', 54, ' '), 1),
(('W', 55, ' '), 1),
(('W', 56, ' '), 1),
(('W', 57, ' '), 1),
(('W', 58, ' '), 1),
(('W', 59, ' '), 1),
(('W', 60, ' '), 1),
(('W', 61, ' '), 1),
(('W', 62, ' '), 1),
(('W', 63, ' '), 1),
(('W', 64, ' '), 1),
(('W', 65, ' '), 1),
(('W', 66, ' '), 1),
(('W', 67, ' '), 1),
(('W', 68, ' '), 1),
(('W', 69, ' '), 1),
(('W', 70, ' '), 1),
(('W', 71, ' '), 1),
(('W', 72, ' '), 1),
(('W', 73, ' '), 1),
(('W', 74, ' '), 1),
(('W', 75, ' '), 1),
(('W', 77, ' '), 1),
])
]
for c_idx, chn in enumerate(chain_data):
# Check chain ID and length
chain = m1.get_list()[c_idx]
self.assertEqual(chain.get_id(), chn[0])
self.assertEqual(len(chain), chn[1])
for r_idx, res in enumerate(chn[2]):
residue = chain.get_list()[r_idx]
# Check residue ID and atom count
self.assertEqual(residue.get_id(), res[0])
self.assertEqual(len(residue), res[1])
disorder_lvl = residue.is_disordered()
if disorder_lvl == 1:
# Check the number of disordered atoms
disordered_count = sum(1 for atom in residue
if atom.is_disordered())
if disordered_count:
self.assertEqual(disordered_count, res[2])
elif disorder_lvl == 2:
# Point mutation -- check residue names
self.assertEqual(residue.disordered_get_id_list(), res[2])
def test_details(self):
"""Verify details of the parsed example PDB file."""
structure = self.structure
self.assertEqual(len(structure), 2)
# First model
model = structure[0]
self.assertEqual(model.id, 0)
self.assertEqual(model.level, "M")
self.assertEqual(len(model), 1)
chain = model["A"]
self.assertEqual(chain.id, "A")
self.assertEqual(chain.level, "C")
self.assertEqual(len(chain), 1)
self.assertEqual(" ".join(residue.resname for residue in chain), "PCA")
self.assertEqual(" ".join(atom.name for atom in chain.get_atoms()),
"N CA CB CG DA OE C O CA ")
self.assertEqual(" ".join(atom.element for atom in chain.get_atoms()),
"N C C C D O C O CA")
# Second model
model = structure[1]
self.assertEqual(model.id, 1)
self.assertEqual(model.level, "M")
self.assertEqual(len(model), 3)
chain = model["A"]
self.assertEqual(chain.id, "A")
self.assertEqual(chain.level, "C")
self.assertEqual(len(chain), 86)
self.assertEqual(" ".join(residue.resname for residue in chain),
"CYS ARG CYS GLY SER GLN GLY GLY GLY SER THR CYS "
"PRO GLY LEU ARG CYS CYS SER ILE TRP GLY TRP CYS "
"GLY ASP SER GLU PRO TYR CYS GLY ARG THR CYS GLU "
"ASN LYS CYS TRP SER GLY GLU ARG SER ASP HIS ARG "
"CYS GLY ALA ALA VAL GLY ASN PRO PRO CYS GLY GLN "
"ASP ARG CYS CYS SER VAL HIS GLY TRP CYS GLY GLY "
"GLY ASN ASP TYR CYS SER GLY GLY ASN CYS GLN TYR "
"ARG CYS")
self.assertEqual(" ".join(atom.name for atom in chain.get_atoms()),
"C N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB SG N "
"CA C O N CA C O CB OG N CA C O CB CG CD OE1 NE2 N CA "
"C O N CA C O N CA C O N CA C O CB OG N CA C O CB OG1 "
"CG2 N CA C O CB SG N CA C O CB CG CD N CA C O N CA C "
"O CB CG CD1 CD2 N CA C O CB CG CD NE CZ NH1 NH2 N CA "
"C O CB SG N CA C O CB SG N CA C O CB OG N CA C O CB "
"CG1 CG2 CD1 N CA C O CB CG CD1 CD2 NE1 CE2 CE3 CZ2 "
"CZ3 CH2 N CA C O N CA C O CB CG CD1 CD2 NE1 CE2 CE3 "
"CZ2 CZ3 CH2 N CA C O CB SG N CA C O N CA C O CB CG "
"OD1 OD2 N CA C O CB OG N CA C O CB CG CD OE1 OE2 N "
"CA C O CB CG CD N CA C O CB CG CD1 CD2 CE1 CE2 CZ OH "
"N CA C O CB SG N CA C O N CA C O CB CG CD NE CZ NH1 "
"NH2 N CA C O CB OG1 CG2 N CA C O CB SG N CA C O CB "
"CG CD OE1 OE2 N CA C O CB CG OD1 ND2 N CA C O CB CG "
"CD CE NZ N CA C O CB SG N CA C O CB CG CD1 CD2 NE1 "
"CE2 CE3 CZ2 CZ3 CH2 N CA C O CB OG N CA C O N CA C "
"O CB CG CD OE1 OE2 N CA C O CB CG CD NE CZ NH1 NH2 "
"N CA C O CB OG N CA C O CB CG OD1 OD2 N CA C O CB "
"CG ND1 CD2 CE1 NE2 N CA C O CB CG CD NE CZ NH1 NH2 "
"N CA C O CB SG N CA C O N CA C O CB N CA C O CB N "
"CA C O CB CG1 CG2 N CA C O N CA C O CB CG OD1 ND2 "
"N CA C O CB CG CD N CA C O CB CG CD N CA C O CB SG "
"N CA C O N CA C O CB CG CD OE1 NE2 N CA C O CB CG "
"OD1 OD2 N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB "
"SG N CA C O CB SG N CA C O CB OG N CA C O CB CG1 CG2 "
"N CA C O CB CG ND1 CD2 CE1 NE2 N CA C O N CA C O CB "
"CG CD1 CD2 NE1 CE2 CE3 CZ2 CZ3 CH2 N CA C O CB SG N "
"CA C O N CA C O N CA C O CA N C O CB CG OD1 ND2 N CA "
"C O CB CG OD1 OD2 N CA C O CB CG CD1 CD2 CE1 CE2 CZ "
"OH N CA C O CB SG N CA C O CB OG N CA C O N CA C O N "
"CA C O CB CG OD1 ND2 N CA C O CB SG N CA C O CB CG "
"CD OE1 NE2 N CA C O CB CG CD1 CD2 CE1 CE2 CZ OH N CA "
"C O CB CG CD NE CZ NH1 NH2 N CA C O CB SG")
self.assertEqual(" ".join(atom.element for atom in chain.get_atoms()),
"C N C C O C C C N C N N N C C O C S N C C O N C C O "
"C O N C C O C C C O N N C C O N C C O N C C O N C C "
"O C O N C C O C O C N C C O C S N C C O C C C N C C "
"O N C C O C C C C N C C O C C C N C N N N C C O C S "
"N C C O C S N C C O C O N C C O C C C C N C C O C C "
"C C N C C C C C N C C O N C C O C C C C N C C C C C "
"N C C O C S N C C O N C C O C C O O N C C O C O N C "
"C O C C C O O N C C O C C C N C C O C C C C C C C O "
"N C C O C S N C C O N C C O C C C N C N N N C C O C "
"O C N C C O C S N C C O C C C O O N C C O C C O N N "
"C C O C C C C N N C C O C S N C C O C C C C N C C C "
"C C N C C O C O N C C O N C C O C C C O O N C C O C "
"C C N C N N N C C O C O N C C O C C O O N C C O C C "
"N C C N N C C O C C C N C N N N C C O C S N C C O N "
"C C O C N C C O C N C C O C C C N C C O N C C O C C "
"O N N C C O C C C N C C O C C C N C C O C S N C C O "
"N C C O C C C O N N C C O C C O O N C C O C C C N C "
"N N N C C O C S N C C O C S N C C O C O N C C O C C "
"C N C C O C C N C C N N C C O N C C O C C C C N C C "
"C C C N C C O C S N C C O N C C O N C C O C N C O C "
"C O N N C C O C C O O N C C O C C C C C C C O N C C "
"O C S N C C O C O N C C O N C C O N C C O C C O N N "
"C C O C S N C C O C C C O N N C C O C C C C C C C O "
"N C C O C C C N C N N N C C O C S")
def test_pdbio_write_truncated(self):
"""Test parsing of truncated lines."""
io = PDBIO()
struct = self.structure
# Write to temp file
io.set_structure(struct)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
# Check if there are lines besides 'ATOM', 'TER' and 'END'
with open(filename, 'rU') as handle:
record_set = set(l[0:6] for l in handle)
record_set -= set(('ATOM ', 'HETATM', 'MODEL ', 'ENDMDL', 'TER\n', 'TER ', 'END\n', 'END '))
self.assertEqual(record_set, set())
finally:
os.remove(filename)
# Tests for sorting methods
def test_comparison_entities(self):
"""Test comparing and sorting the several SMCRA objects"""
struct = self.structure
# Sorting (<, >, <=, <=)
# Chains (same code as models)
model = struct[1]
chains = [c.id for c in sorted(model)]
self.assertEqual(chains, ['A', 'B', ' '])
# Residues
residues = [r.id[1] for r in sorted(struct[1]['B'])]
self.assertEqual(residues, [1, 2, 3, 4, 0])
# Atoms
for residue in struct.get_residues():
old = [a.name for a in residue]
new = [a.name for a in sorted(residue)]
special = [a for a in ('N', 'CA', 'C', 'O') if a in old]
len_special = len(special)
# Placed N, CA, C, O first?
self.assertEqual(new[:len_special], special,
"Sorted residue did not place N, CA, C, O first: %s" % new)
# Placed everyone else alphabetically?
self.assertEqual(new[len_special:], sorted(new[len_special:]),
"After N, CA, C, O order Should be alphabetical: %s" % new)
# DisorderedResidue
residues = [r.id[1] for r in sorted(struct[1]['A'])][79:81]
self.assertEqual(residues, [80, 81])
# DisorderedAtom
atoms = [a.altloc for a in sorted(struct[1]['A'][74]['OD1'])]
self.assertEqual(atoms, ['A', 'B'])
# Comparisons
self.assertTrue(model == model) # __eq__ same type
self.assertFalse(struct[0] == struct[1])
self.assertFalse(struct[0] == []) # __eq__ diff. types
self.assertFalse(struct == model)
# In Py2 this will be True/False, in Py3 it will raise a TypeError.
try:
self.assertTrue(struct > model) # __gt__ diff. types
except TypeError:
pass
try:
self.assertFalse(struct >= []) # __le__ diff. types
except TypeError:
pass
def test_deepcopy_of_structure_with_disorder(self):
"""Test deepcopy of a structure with disordered atoms"""
structure = deepcopy(self.structure)
class ParseReal(unittest.TestCase):
"""Testing with real PDB files."""
def test_empty(self):
"""Parse an empty file."""
parser = PDBParser()
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
struct = parser.get_structure('MT', filename)
# Structure has no children (models)
self.assertFalse(len(struct))
finally:
os.remove(filename)
def test_residue_sort(self):
"""Sorting atoms in residues."""
parser = PDBParser(PERMISSIVE=False)
structure = parser.get_structure("example", "PDB/1A8O.pdb")
for residue in structure.get_residues():
old = [a.name for a in residue]
new = [a.name for a in sorted(residue)]
special = []
for a in ['N', 'CA', 'C', 'O']:
if a in old:
special.append(a)
special_len = len(special)
self.assertEqual(new[0:special_len], special,
"Sorted residue did not place N, CA, C, O first: %s" % new)
self.assertEqual(new[special_len:], sorted(new[special_len:]),
"After N, CA, C, O should be alphabet: %s" % new)
def test_c_n(self):
"""Extract polypeptides from 1A80."""
parser = PDBParser(PERMISSIVE=False)
structure = parser.get_structure("example", "PDB/1A8O.pdb")
self.assertEqual(len(structure), 1)
for ppbuild in [PPBuilder(), CaPPBuilder()]:
# ==========================================================
# First try allowing non-standard amino acids,
polypeptides = ppbuild.build_peptides(structure[0], False)
self.assertEqual(len(polypeptides), 1)
pp = polypeptides[0]
# Check the start and end positions
self.assertEqual(pp[0].get_id()[1], 151)
self.assertEqual(pp[-1].get_id()[1], 220)
# Check the sequence
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
# Here non-standard MSE are shown as M
self.assertEqual("MDIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNWMTETLLVQ"
"NANPDCKTILKALGPGATLEEMMTACQG", str(s))
# ==========================================================
# Now try strict version with only standard amino acids
# Should ignore MSE 151 at start, and then break the chain
# at MSE 185, and MSE 214,215
polypeptides = ppbuild.build_peptides(structure[0], True)
self.assertEqual(len(polypeptides), 3)
# First fragment
pp = polypeptides[0]
self.assertEqual(pp[0].get_id()[1], 152)
self.assertEqual(pp[-1].get_id()[1], 184)
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("DIRQGPKEPFRDYVDRFYKTLRAEQASQEVKNW", str(s))
# Second fragment
pp = polypeptides[1]
self.assertEqual(pp[0].get_id()[1], 186)
self.assertEqual(pp[-1].get_id()[1], 213)
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("TETLLVQNANPDCKTILKALGPGATLEE", str(s))
# Third fragment
pp = polypeptides[2]
self.assertEqual(pp[0].get_id()[1], 216)
self.assertEqual(pp[-1].get_id()[1], 220)
s = pp.get_sequence()
self.assertTrue(isinstance(s, Seq))
self.assertEqual(s.alphabet, generic_protein)
self.assertEqual("TACQG", str(s))
def test_strict(self):
"""Parse 1A8O.pdb file in strict mode."""
parser = PDBParser(PERMISSIVE=False)
structure = parser.get_structure("example", "PDB/1A8O.pdb")
self.assertEqual(len(structure), 1)
model = structure[0]
self.assertEqual(model.id, 0)
self.assertEqual(model.level, "M")
self.assertEqual(len(model), 1)
chain = model["A"]
self.assertEqual(chain.id, "A")
self.assertEqual(chain.level, "C")
self.assertEqual(len(chain), 158)
self.assertEqual(" ".join(residue.resname for residue in chain),
"MSE ASP ILE ARG GLN GLY PRO LYS GLU PRO PHE ARG "
"ASP TYR VAL ASP ARG PHE TYR LYS THR LEU ARG ALA "
"GLU GLN ALA SER GLN GLU VAL LYS ASN TRP MSE THR "
"GLU THR LEU LEU VAL GLN ASN ALA ASN PRO ASP CYS "
"LYS THR ILE LEU LYS ALA LEU GLY PRO GLY ALA THR "
"LEU GLU GLU MSE MSE THR ALA CYS GLN GLY HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH HOH "
"HOH HOH")
self.assertEqual(" ".join(atom.name for atom in chain.get_atoms()),
"N CA C O CB CG SE CE N CA C O CB CG OD1 OD2 N CA "
"C O CB CG1 CG2 CD1 N CA C O CB CG CD NE CZ NH1 "
"NH2 N CA C O CB CG CD OE1 NE2 N CA C O N CA C O "
"CB CG CD N CA C O CB CG CD CE NZ N CA C O CB CG "
"CD OE1 OE2 N CA C O CB CG CD N CA C O CB CG CD1 "
"CD2 CE1 CE2 CZ N CA C O CB CG CD NE CZ NH1 NH2 N "
"CA C O CB CG OD1 OD2 N CA C O CB CG CD1 CD2 CE1 "
"CE2 CZ OH N CA C O CB CG1 CG2 N CA C O CB CG OD1 "
"OD2 N CA C O CB CG CD NE CZ NH1 NH2 N CA C O CB "
"CG CD1 CD2 CE1 CE2 CZ N CA C O CB CG CD1 CD2 CE1 "
"CE2 CZ OH N CA C O CB CG CD CE NZ N CA C O CB "
"OG1 CG2 N CA C O CB CG CD1 CD2 N CA C O CB CG CD "
"NE CZ NH1 NH2 N CA C O CB N CA C O CB CG CD OE1 "
"OE2 N CA C O CB CG CD OE1 NE2 N CA C O CB N CA C "
"O CB OG N CA C O CB CG CD OE1 NE2 N CA C O CB CG "
"CD OE1 OE2 N CA C O CB CG1 CG2 N CA C O CB CG CD "
"CE NZ N CA C O CB CG OD1 ND2 N CA C O CB CG CD1 "
"CD2 NE1 CE2 CE3 CZ2 CZ3 CH2 N CA C O CB CG SE CE "
"N CA C O CB OG1 CG2 N CA C O CB CG CD OE1 OE2 N "
"CA C O CB OG1 CG2 N CA C O CB CG CD1 CD2 N CA C "
"O CB CG CD1 CD2 N CA C O CB CG1 CG2 N CA C O CB "
"CG CD OE1 NE2 N CA C O CB CG OD1 ND2 N CA C O CB "
"N CA C O CB CG OD1 ND2 N CA C O CB CG CD N CA C "
"O CB CG OD1 OD2 N CA C O CB SG N CA C O CB CG CD "
"CE NZ N CA C O CB OG1 CG2 N CA C O CB CG1 CG2 "
"CD1 N CA C O CB CG CD1 CD2 N CA C O CB CG CD CE "
"NZ N CA C O CB N CA C O CB CG CD1 CD2 N CA C O N "
"CA C O CB CG CD N CA C O N CA C O CB N CA C O CB "
"OG1 CG2 N CA C O CB CG CD1 CD2 N CA C O CB CG CD "
"OE1 OE2 N CA C O CB CG CD OE1 OE2 N CA C O CB CG "
"SE CE N CA C O CB CG SE CE N CA C O CB OG1 CG2 N "
"CA C O CB N CA C O CB SG N CA C O CB CG CD OE1 "
"NE2 N CA C O OXT O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O")
self.assertEqual(" ".join(atom.element for atom in chain.get_atoms()),
"N C C O C C SE C N C C O C C O O N C C O C C C C "
"N C C O C C C N C N N N C C O C C C O N N C C O "
"N C C O C C C N C C O C C C C N N C C O C C C O "
"O N C C O C C C N C C O C C C C C C C N C C O C "
"C C N C N N N C C O C C O O N C C O C C C C C C "
"C O N C C O C C C N C C O C C O O N C C O C C C "
"N C N N N C C O C C C C C C C N C C O C C C C C "
"C C O N C C O C C C C N N C C O C O C N C C O C "
"C C C N C C O C C C N C N N N C C O C N C C O C "
"C C O O N C C O C C C O N N C C O C N C C O C O "
"N C C O C C C O N N C C O C C C O O N C C O C C "
"C N C C O C C C C N N C C O C C O N N C C O C C "
"C C N C C C C C N C C O C C SE C N C C O C O C N "
"C C O C C C O O N C C O C O C N C C O C C C C N "
"C C O C C C C N C C O C C C N C C O C C C O N N "
"C C O C C O N N C C O C N C C O C C O N N C C O "
"C C C N C C O C C O O N C C O C S N C C O C C C "
"C N N C C O C O C N C C O C C C C N C C O C C C "
"C N C C O C C C C N N C C O C N C C O C C C C N "
"C C O N C C O C C C N C C O N C C O C N C C O C "
"O C N C C O C C C C N C C O C C C O O N C C O C "
"C C O O N C C O C C SE C N C C O C C SE C N C C "
"O C O C N C C O C N C C O C S N C C O C C C O N "
"N C C O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O O O O "
"O O O O O O O O O O O O O O O O O O O O O")
def test_model_numbering(self):
"""Preserve model serial numbers during I/O."""
def confirm_numbering(struct):
self.assertEqual(len(struct), 3)
for idx, model in enumerate(struct):
self.assertEqual(model.serial_num, idx + 1)
self.assertEqual(model.serial_num, model.id + 1)
def confirm_single_end(fname):
"""Ensure there is only one END statement in multi-model files."""
with open(fname) as handle:
end_stment = []
for iline, line in enumerate(handle):
if line.strip() == 'END':
end_stment.append((line, iline))
self.assertEqual(len(end_stment), 1) # Only one?
self.assertEqual(end_stment[0][1], iline) # Last line of the file?
parser = PDBParser(QUIET=1)
struct1 = parser.get_structure("1lcd", "PDB/1LCD.pdb")
confirm_numbering(struct1)
# Round trip: serialize and parse again
io = PDBIO()
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = parser.get_structure("1lcd", filename)
confirm_numbering(struct2)
confirm_single_end(filename)
finally:
os.remove(filename)
class WriteTest(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
self.parser = PDBParser(PERMISSIVE=1)
self.mmcif_parser = MMCIFParser()
self.structure = self.parser.get_structure("example", "PDB/1A8O.pdb")
self.mmcif_file = "PDB/1A8O.cif"
self.mmcif_multimodel_pdb_file = "PDB/1SSU_mod.pdb"
self.mmcif_multimodel_mmcif_file = "PDB/1SSU_mod.cif"
def test_pdbio_write_structure(self):
"""Write a full structure using PDBIO."""
io = PDBIO()
struct1 = self.structure
# Write full model to temp file
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(len(struct2), 1)
self.assertEqual(nresidues, 158)
finally:
os.remove(filename)
def test_pdbio_write_residue(self):
"""Write a single residue using PDBIO"""
io = PDBIO()
struct1 = self.structure
residue1 = list(struct1.get_residues())[0]
# Write full model to temp file
io.set_structure(residue1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(nresidues, 1)
finally:
os.remove(filename)
def test_pdbio_write_custom_residue(self):
"""Write a chainless residue using PDBIO."""
io = PDBIO()
res = Residue.Residue((' ', 1, ' '), 'DUM', '')
atm = Atom.Atom('CA', [0.1, 0.1, 0.1], 1.0, 1.0, ' ', 'CA', 1, 'C')
res.add(atm)
# Write full model to temp file
io.set_structure(res)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.parser.get_structure("res", filename)
latoms = list(struct2.get_atoms())
self.assertEqual(len(latoms), 1)
self.assertEqual(latoms[0].name, 'CA')
self.assertEqual(latoms[0].parent.resname, 'DUM')
self.assertEqual(latoms[0].parent.parent.id, 'A')
finally:
os.remove(filename)
def test_pdbio_select(self):
"""Write a selection of the structure using a Select subclass."""
# Selection class to filter all alpha carbons
class CAonly(Select):
"""Accepts only CA residues."""
def accept_atom(self, atom):
if atom.name == "CA" and atom.element == "C":
return 1
io = PDBIO()
struct1 = self.structure
# Write to temp file
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename, CAonly())
struct2 = self.parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(nresidues, 70)
finally:
os.remove(filename)
def test_pdbio_missing_occupancy(self):
"""Write PDB file with missing occupancy."""
io = PDBIO()
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure = self.parser.get_structure("test", "PDB/occupancy.pdb")
io.set_structure(structure)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", BiopythonWarning)
io.save(filename)
self.assertEqual(len(w), 1, w)
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
struct2 = self.parser.get_structure("test", filename)
atoms = struct2[0]['A'][(' ', 152, ' ')]
self.assertEqual(atoms['N'].get_occupancy(), None)
finally:
os.remove(filename)
def test_mmcifio_write_structure(self):
"""Write a full structure using MMCIFIO."""
io = MMCIFIO()
struct1 = self.structure
# Write full model to temp file
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.mmcif_parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(len(struct2), 1)
self.assertEqual(nresidues, 158)
finally:
os.remove(filename)
def test_mmcifio_write_residue(self):
"""Write a single residue using MMCIFIO."""
io = MMCIFIO()
struct1 = self.structure
residue1 = list(struct1.get_residues())[0]
# Write full model to temp file
io.set_structure(residue1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct2 = self.mmcif_parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(nresidues, 1)
finally:
os.remove(filename)
def test_mmcifio_select(self):
"""Write a selection of the structure using a Select subclass."""
# Selection class to filter all alpha carbons
class CAonly(Select):
"""Accepts only CA residues."""
def accept_atom(self, atom):
if atom.name == "CA" and atom.element == "C":
return 1
io = MMCIFIO()
struct1 = self.structure
# Write to temp file
io.set_structure(struct1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename, CAonly())
struct2 = self.mmcif_parser.get_structure("1a8o", filename)
nresidues = len(list(struct2.get_residues()))
self.assertEqual(nresidues, 70)
finally:
os.remove(filename)
def test_mmcifio_write_dict(self):
"""Write an mmCIF dictionary out, read it in and compare them."""
d1 = MMCIF2Dict(self.mmcif_file)
io = MMCIFIO()
# Write to temp file
io.set_dict(d1)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
d2 = MMCIF2Dict(filename)
k1 = sorted(d1.keys())
k2 = sorted(d2.keys())
self.assertEqual(k1, k2)
for key in k1:
self.assertEqual(d1[key], d2[key])
finally:
os.remove(filename)
def test_mmcifio_multimodel(self):
"""Write a multi-model, multi-chain mmCIF file."""
pdb_struct = self.parser.get_structure("1SSU_mod_pdb", self.mmcif_multimodel_pdb_file)
mmcif_struct = self.mmcif_parser.get_structure("1SSU_mod_mmcif", self.mmcif_multimodel_mmcif_file)
io = MMCIFIO()
for struct in [pdb_struct, mmcif_struct]:
io.set_structure(struct)
filenumber, filename = tempfile.mkstemp()
os.close(filenumber)
try:
io.save(filename)
struct_in = self.mmcif_parser.get_structure("1SSU_mod_in", filename)
self.assertEqual(len(struct_in), 2)
self.assertEqual(len(struct_in[1]), 2)
self.assertEqual(round(float(struct_in[1]["B"][1]["N"].get_coord()[0]), 3), 6.259)
finally:
os.remove(filename)
class Exposure(unittest.TestCase):
"""Testing Bio.PDB.HSExposure."""
def setUp(self):
pdb_filename = "PDB/a_structure.pdb"
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure = PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
self.model = structure[1]
# Look at first chain only
a_residues = list(self.model["A"].child_list)
self.assertEqual(86, len(a_residues))
self.assertEqual(a_residues[0].get_resname(), "CYS")
self.assertEqual(a_residues[1].get_resname(), "ARG")
self.assertEqual(a_residues[2].get_resname(), "CYS")
self.assertEqual(a_residues[3].get_resname(), "GLY")
# ...
self.assertEqual(a_residues[-3].get_resname(), "TYR")
self.assertEqual(a_residues[-2].get_resname(), "ARG")
self.assertEqual(a_residues[-1].get_resname(), "CYS")
self.a_residues = a_residues
self.radius = 13.0
def test_HSExposureCA(self):
"""HSExposureCA."""
hse = HSExposureCA(self.model, self.radius)
residues = self.a_residues
self.assertEqual(0, len(residues[0].xtra))
self.assertEqual(0, len(residues[1].xtra))
self.assertEqual(3, len(residues[2].xtra))
self.assertAlmostEqual(0.81250973133184456, residues[2].xtra["EXP_CB_PCB_ANGLE"])
self.assertEqual(14, residues[2].xtra["EXP_HSE_A_D"])
self.assertEqual(14, residues[2].xtra["EXP_HSE_A_U"])
self.assertEqual(3, len(residues[3].xtra))
self.assertAlmostEqual(1.3383737, residues[3].xtra["EXP_CB_PCB_ANGLE"])
self.assertEqual(13, residues[3].xtra["EXP_HSE_A_D"])
self.assertEqual(16, residues[3].xtra["EXP_HSE_A_U"])
# ...
self.assertEqual(3, len(residues[-2].xtra))
self.assertAlmostEqual(0.77124014456278489, residues[-2].xtra["EXP_CB_PCB_ANGLE"])
self.assertEqual(24, residues[-2].xtra["EXP_HSE_A_D"])
self.assertEqual(24, residues[-2].xtra["EXP_HSE_A_U"])
self.assertEqual(0, len(residues[-1].xtra))
def test_HSExposureCB(self):
"""HSExposureCB."""
hse = HSExposureCB(self.model, self.radius)
residues = self.a_residues
self.assertEqual(0, len(residues[0].xtra))
self.assertEqual(2, len(residues[1].xtra))
self.assertEqual(20, residues[1].xtra["EXP_HSE_B_D"])
self.assertEqual(5, residues[1].xtra["EXP_HSE_B_U"])
self.assertEqual(2, len(residues[2].xtra))
self.assertEqual(10, residues[2].xtra["EXP_HSE_B_D"])
self.assertEqual(18, residues[2].xtra["EXP_HSE_B_U"])
self.assertEqual(2, len(residues[3].xtra))
self.assertEqual(7, residues[3].xtra["EXP_HSE_B_D"])
self.assertEqual(22, residues[3].xtra["EXP_HSE_B_U"])
# ...
self.assertEqual(2, len(residues[-2].xtra))
self.assertEqual(14, residues[-2].xtra["EXP_HSE_B_D"])
self.assertEqual(34, residues[-2].xtra["EXP_HSE_B_U"])
self.assertEqual(2, len(residues[-1].xtra))
self.assertEqual(23, residues[-1].xtra["EXP_HSE_B_D"])
self.assertEqual(15, residues[-1].xtra["EXP_HSE_B_U"])
def test_ExposureCN(self):
"""HSExposureCN."""
hse = ExposureCN(self.model, self.radius)
residues = self.a_residues
self.assertEqual(0, len(residues[0].xtra))
self.assertEqual(1, len(residues[1].xtra))
self.assertEqual(25, residues[1].xtra["EXP_CN"])
self.assertEqual(1, len(residues[2].xtra))
self.assertEqual(28, residues[2].xtra["EXP_CN"])
self.assertEqual(1, len(residues[3].xtra))
self.assertEqual(29, residues[3].xtra["EXP_CN"])
# ...
self.assertEqual(1, len(residues[-2].xtra))
self.assertEqual(48, residues[-2].xtra["EXP_CN"])
self.assertEqual(1, len(residues[-1].xtra))
self.assertEqual(38, residues[-1].xtra["EXP_CN"])
class Atom_Element(unittest.TestCase):
"""induces Atom Element from Atom Name."""
def setUp(self):
pdb_filename = "PDB/a_structure.pdb"
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
structure = PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
self.residue = structure[0]['A'][('H_PCA', 1, ' ')]
def test_AtomElement(self):
"""Atom Element."""
atoms = self.residue.child_list
self.assertEqual('N', atoms[0].element) # N
self.assertEqual('C', atoms[1].element) # Alpha Carbon
self.assertEqual('CA', atoms[8].element) # Calcium
self.assertEqual('D', atoms[4].element) # Deuterium
def test_ions(self):
"""Element for magnesium is assigned correctly."""
pdb_filename = "PDB/ions.pdb"
structure = PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
# check magnesium atom
atoms = structure[0]['A'][('H_ MG', 1, ' ')].child_list
self.assertEqual('MG', atoms[0].element)
def test_hydrogens(self):
def quick_assign(fullname):
return Atom.Atom(fullname.strip(), None, None, None, None,
fullname, None).element
pdb_elements = dict(
H=(' H ', ' HA ', ' HB ', ' HD1', ' HD2', ' HE ', ' HE1', ' HE2',
' HE3', ' HG ', ' HG1', ' HH ', ' HH2', ' HZ ', ' HZ2', ' HZ3',
'1H ', '1HA ', '1HB ', '1HD ', '1HD1', '1HD2', '1HE ', '1HE2',
'1HG ', '1HG1', '1HG2', '1HH1', '1HH2', '1HZ ', '2H ', '2HA ',
'2HB ', '2HD ', '2HD1', '2HD2', '2HE ', '2HE2', '2HG ', '2HG1',
'2HG2', '2HH1', '2HH2', '2HZ ', '3H ', '3HB ', '3HD1', '3HD2',
'3HE ', '3HG1', '3HG2', '3HZ ', 'HE21'),
O=(' OH ',), # noqa: E741
C=(' CH2',),
N=(' NH1', ' NH2'),
)
for element, atom_names in pdb_elements.items():
for fullname in atom_names:
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
e = quick_assign(fullname)
# warnings.warn("%s %s" % (fullname, e))
self.assertEqual(e, element)
class IterationTests(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
self.struc = PDBParser(PERMISSIVE=True).get_structure('X', "PDB/a_structure.pdb")
def test_get_chains(self):
"""Yields chains from different models separately."""
chains = [chain.id for chain in self.struc.get_chains()]
self.assertEqual(chains, ['A', 'A', 'B', ' '])
def test_get_residues(self):
"""Yields all residues from all models."""
residues = [resi.id for resi in self.struc.get_residues()]
self.assertEqual(len(residues), 168)
def test_get_atoms(self):
"""Yields all atoms from the structure, excluding duplicates and ALTLOCs which are not parsed."""
atoms = ["%12s" % str((atom.id, atom.altloc)) for atom in self.struc.get_atoms()]
self.assertEqual(len(atoms), 757)
class ChangingIdTests(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
self.struc = PDBParser(PERMISSIVE=True).get_structure(
'X', "PDB/a_structure.pdb")
def test_change_model_id(self):
"""Change the id of a model."""
for model in self.struc:
break # Get first model in structure
model.id = 2
self.assertEqual(model.id, 2)
self.assertIn(2, self.struc)
self.assertNotIn(0, self.struc)
def test_change_model_id_raises(self):
"""Cannot change id to a value already in use by another child."""
model = next(iter(self.struc))
with self.assertRaises(ValueError):
model.id = 1
# Make sure nothing was changed
self.assertEqual(model.id, 0)
self.assertIn(0, self.struc)
self.assertIn(1, self.struc)
def test_change_chain_id(self):
"""Change the id of a model."""
chain = next(iter(self.struc.get_chains()))
chain.id = "R"
self.assertEqual(chain.id, "R")
model = next(iter(self.struc))
self.assertIn("R", model)
def test_change_residue_id(self):
"""Change the id of a residue."""
chain = next(iter(self.struc.get_chains()))
res = chain[('H_PCA', 1, ' ')]
res.id = (' ', 1, ' ')
self.assertEqual(res.id, (' ', 1, ' '))
self.assertIn((' ', 1, ' '), chain)
self.assertNotIn(('H_PCA', 1, ' '), chain)
self.assertEqual(chain[(' ', 1, ' ')], res)
def test_full_id_is_updated_residue(self):
"""Invalidate cached full_ids if an id is changed."""
atom = next(iter(self.struc.get_atoms()))
# Generate the original full id.
original_id = atom.get_full_id()
self.assertEqual(original_id,
('X', 0, 'A', ('H_PCA', 1, ' '), ('N', ' ')))
residue = next(iter(self.struc.get_residues()))
# Make sure the full id was in fact cached,
# so we need to invalidate it later.
self.assertEqual(residue.full_id, ('X', 0, 'A', ('H_PCA', 1, ' ')))
# Changing the residue's id should lead to an updated full id.
residue.id = (' ', 1, ' ')
new_id = atom.get_full_id()
self.assertNotEqual(original_id, new_id)
self.assertEqual(new_id, ('X', 0, 'A', (' ', 1, ' '), ('N', ' ')))
def test_full_id_is_updated_chain(self):
"""Invalidate cached full_ids if an id is changed."""
atom = next(iter(self.struc.get_atoms()))
# Generate the original full id.
original_id = atom.get_full_id()
self.assertEqual(original_id,
('X', 0, 'A', ('H_PCA', 1, ' '), ('N', ' ')))
residue = next(iter(self.struc.get_residues()))
# Make sure the full id was in fact cached,
# so we need to invalidate it later.
self.assertEqual(residue.full_id, ('X', 0, 'A', ('H_PCA', 1, ' ')))
chain = next(iter(self.struc.get_chains()))
# Changing the chain's id should lead to an updated full id.
chain.id = 'Q'
new_id = atom.get_full_id()
self.assertNotEqual(original_id, new_id)
self.assertEqual(new_id, ('X', 0, 'Q', ('H_PCA', 1, ' '), ('N', ' ')))
# class RenumberTests(unittest.TestCase):
# """Tests renumbering of structures."""
#
# def setUp(self):
# pdb_filename = "PDB/1A8O.pdb"
# self.structure=PDBParser(PERMISSIVE=True).get_structure('X', pdb_filename)
#
# def test_renumber_residues(self):
# """Residues in a structure are renumbered."""
# self.structure.renumber_residues()
# nums = [resi.id[1] for resi in self.structure[0]['A'].child_list]
# print(nums)
#
# -------------------------------------------------------------
class TransformTests(unittest.TestCase):
def setUp(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", PDBConstructionWarning)
self.s = PDBParser(PERMISSIVE=True).get_structure(
'X', "PDB/a_structure.pdb")
self.m = self.s.get_list()[0]
self.c = self.m.get_list()[0]
self.r = self.c.get_list()[0]
self.a = self.r.get_list()[0]
def get_total_pos(self, o):
"""Sum of positions of atoms in an entity along with the number of atoms."""
if hasattr(o, "get_coord"):
return o.get_coord(), 1
total_pos = numpy.array((0.0, 0.0, 0.0))
total_count = 0
for p in o.get_list():
pos, count = self.get_total_pos(p)
total_pos += pos
total_count += count
return total_pos, total_count
def get_pos(self, o):
"""Average atom position in an entity."""
pos, count = self.get_total_pos(o)
return 1.0 * pos / count
def test_transform(self):
"""Transform entities (rotation and translation)."""
for o in (self.s, self.m, self.c, self.r, self.a):
rotation = rotmat(Vector(1, 3, 5), Vector(1, 0, 0))
translation = numpy.array((2.4, 0, 1), 'f')
oldpos = self.get_pos(o)
o.transform(rotation, translation)
newpos = self.get_pos(o)
newpos_check = numpy.dot(oldpos, rotation) + translation
for i in range(0, 3):
self.assertAlmostEqual(newpos[i], newpos_check[i])
def test_Vector(self):
"""Test Vector object."""
v1 = Vector(0, 0, 1)
v2 = Vector(0, 0, 0)
v3 = Vector(0, 1, 0)
v4 = Vector(1, 1, 0)
self.assertEqual(calc_angle(v1, v2, v3), 1.5707963267948966)
self.assertEqual(calc_dihedral(v1, v2, v3, v4), 1.5707963267948966)
self.assertTrue(numpy.array_equal((v1 - v2).get_array(), numpy.array([0.0, 0.0, 1.0])))
self.assertTrue(numpy.array_equal((v1 - 1).get_array(), numpy.array([-1.0, -1.0, 0.0])))
self.assertTrue(numpy.array_equal((v1 - (1, 2, 3)).get_array(), numpy.array([-1.0, -2.0, -2.0])))
self.assertTrue(numpy.array_equal((v1 + v2).get_array(), | numpy.array([0.0, 0.0, 1.0]) | numpy.array |
'''
This file contains functions for pruning resnet-like model in layer level
1. prune_resconv_layer (resnet: conv layers)
2. prune_resnet_lconv_layer (resnet: lconv means identity layer)
3. prune_rbconv_by_indices (resnet: rbconv means right path's bottom layer)
4. prune_rbconv_by_number (resnet: used when you prune lconv but next block/layer cannot absorb your effect)
5. prune_ruconv1_layer (resnet: for resnet normal conv1 layers (i.e. right path's first upper layers))
6. prune_ruconv2_layer (resnet: for resnet normal conv2 layers (i.e. right path's second upper layers))
Author: xuhuahuang as intern in YouTu 07/2018
'''
import torch
from torch.autograd import Variable
from torchvision import models
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in DataLoader
# OpenCL may be enabled by default in OpenCV3;
# disable it because it because it's not thread safe and causes unwanted GPU memory allocations
cv2.ocl.setUseOpenCL(False)
import sys
import numpy as np
from models.resnet import BasicBlock, Bottleneck
def replace_layers(model, i, indexes, layers):
if i in indexes:
# layers and indexes store new layers used to update old layers
return layers[indexes.index(i)]
# if i not in indexes, use old layers
return model[i]
# helper function
'''
Helper function for updating immediate following layer/block's input channels
Args:
model: model after pruning current layer/block
layer_index: current layer index. Locate the block/layer being pruned filters NOW
filters_to_prune: the output channels indices being pruned
**Note**
Not handle case described by prune_rbconv_by_number()
Not handle case inside prune_ruconv1_layer() and prune_ruconv2_layer() because they are inside same block
'''
def update_next_layers(model, layer_index, filters_to_prune):
# only need to change in_channels for all following objects based on filters_to_prune
next_conv = None
next_blk = None
next_ds = None # if next one is a block, and this block has downsample path, you need to update both residual and downsample path
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
next_is_block = False
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_is_block = True
next_blk = res[1]
if res[1].downsample is None:
next_conv = res[1].conv1
next_ds = None
else:
next_conv = res[1].conv1
next_ds = res[1].downsample
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
if len(filters_to_prune) == 0:
print("No filter will be prunned for this layer")
return model
cut = len(filters_to_prune)
# next_conv must exists
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data
# next_ds exists or not is okay, no matter next_is_block is True or not
if next_ds is not None:
old_conv_in_next_ds = next_ds[0]
new_conv_in_next_new_ds = \
torch.nn.Conv2d(in_channels = old_conv_in_next_ds.in_channels - cut,\
out_channels = old_conv_in_next_ds.out_channels, \
kernel_size = old_conv_in_next_ds.kernel_size, \
stride = old_conv_in_next_ds.stride,
padding = old_conv_in_next_ds.padding,
dilation = old_conv_in_next_ds.dilation,
groups = old_conv_in_next_ds.groups,
bias = old_conv_in_next_ds.bias is not None)
old_weights = old_conv_in_next_ds.weight.data.cpu().numpy()
new_weights = new_conv_in_next_new_ds.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
new_conv_in_next_new_ds.weight.data = torch.from_numpy(new_weights).cuda()
if old_conv_in_next_ds.bias is not None:
new_conv_in_next_new_ds.bias.data = old_conv_in_next_ds.bias.data # bias won't change
next_new_ds = torch.nn.Sequential(new_conv_in_next_new_ds, next_ds[1]) # BN keeps unchanged
else:
next_new_ds = None
# next_new_ds and next_new_conv are ready now, create a next_new_block for replace_layers()
if next_is_block: #same as next_blk is not None:
if isinstance(next_blk, BasicBlock):
# rely on conv1 of old block to get in_planes, out_planes, tride
next_new_block = BasicBlock(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv # only update in_channels
next_new_block.bn1 = next_blk.bn1
next_new_block.relu = next_blk.relu
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
else:
next_new_block = Bottleneck(next_blk.conv1.in_channels - cut, \
next_blk.conv1.out_channels, next_blk.stride, downsample = next_new_ds)
next_new_block.conv1 = next_new_conv # only update in_channels
next_new_block.bn1 = next_blk.bn1
next_new_block.conv2 = next_blk.conv2
next_new_block.bn2 = next_blk.bn2
next_new_block.conv3 = next_blk.conv3
next_new_block.bn3 = next_blk.bn3
next_new_block.relu = next_blk.relu
if not next_is_block:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_conv]) for i, _ in enumerate(model.base)))
else:
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index+offset], \
[next_new_block]) for i, _ in enumerate(model.base)))
del model.base # delete and replace with brand new one
model.base = base
print("Finished update next layers.")
return model
'''
--------------------------------------------------------------------------------
1. Prune conv layers in resnet with/without BN (only support layers stored in model.base for now)
Args:
model: model for pruning
layer_index: index the pruned layer's location within model
cut_ratio: the ratio of filters you want to prune from this layer (e.g. 20% - cut 20% lowest weights layers)
Adapted from: https://github.com/jacobgil/pytorch-pruning
'''
def prune_resconv_layer(model, layer_index, cut_ratio=0.2, use_bn = True):
_, conv = list(model.base._modules.items())[layer_index]
if use_bn:
_, old_bn = list(model.base._modules.items())[layer_index + 1]
next_conv = None
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
elif isinstance(res[1], (BasicBlock, Bottleneck)):
next_conv = res[1].conv1
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None: # no bias for conv layers
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# BatchNorm modification
# TODO: Extract this function outside as a separate func.
if use_bn:
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, \
eps=old_bn.eps, momentum=old_bn.momentum, affine=old_bn.affine)
# old_bn.affine == True, need to copy learning gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = old_bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = | np.delete(old_weights, filters_to_prune) | numpy.delete |
# %% [markdown]
## Imports
# %%
# Data Processing
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["font.size"] = 12
plt.rcParams["axes.labelsize"] = 'x-large'
from matplotlib.collections import LineCollection
import scipy as scp
from scipy import interpolate
import numpy as np
import seaborn as sns
from sklearn.preprocessing import normalize
# General
import os
import simplejson as json
import time
import copy
# from process_data import interface_dfs
# targetbarclicks = ['panel.click','arrow.click','drag.click','targetdrag.click','target.click']
# targetbarprs = ['panel.press/release','arrow.press/release','drag.press/release','targetdrag.press/release', 'targetdrag.press/release']
targetbarclicks = ['panel-click','arrow-click','drag-click','targetdrag-click','target-click']
targetbarprs = ['panel-press/release','arrow-press/release','drag-press/release','targetdrag-press/release', 'targetdrag-press/release']
#targetbarnames = ['arrow.click','drag.click','panel.click','target.click','targetdrag.click','arrow.p/r','drag.p/r','panel.p/r','targetdrag.p/r']
targetplotnames = ['Fixed','ArrowRing','CircleRing','TargetAnchor','TargetRing']
targetplotcolors = ['#ffe500','#ff9405','#ff4791','#007bff','#00c36b']
targetplotcolorslight = ['#ffe486','#ffb757','#ff8dbb','#64afff','#00fd8b']
interfaceIDs = ['arrow-click','drag-click','panel-click','target-click','targetdrag-click','arrow-press/release','drag-press/release','panel-press/release','targetdrag-press/release']
def plot_everything():
# interfaceIDs = ['arrow.click','drag.click','panel.click','target.click','targetdrag.click','arrow.press/release','drag.press/release','panel.press/release','targetdrag.press/release']
cycles_df = pd.read_csv("data/se2-10-29-filtered-cycles.csv", skiprows = 0)
print(cycles_df.columns)
# print(cycles_df.head())
uids = cycles_df["uid"].unique()
user_dfs = {}
user_data_columns = ["uid", "interfaceID", "numClicks", "draggingDuration", 'cycleLength']
user_data = []
for uid in uids:
user_df = cycles_df[cycles_df["uid"] == uid]
interface_id = user_df["interfaceID"].unique()[0]
user_data.append([uid, interface_id, np.mean(user_df['numClicks']), np.mean(user_df['draggingDuration']), np.mean(user_df['cycleLength'])]);
user_dfs = pd.DataFrame(user_data, columns=user_data_columns)
interface_dfs = {}
for interfaceID in interfaceIDs:
interface_dfs[interfaceID] = user_dfs[user_dfs["interfaceID"] == interfaceID]
all_interface_dfs = {}
for interfaceID in interfaceIDs:
all_interface_dfs[interfaceID] = cycles_df[cycles_df["interfaceID"] == interfaceID]
# %%
for interfaceID in interface_dfs:
interface_df = interface_dfs[interfaceID]
print(interfaceID)
print("Mean:", np.mean(interface_df['cycleLength']))
print("Standard Deviation:",np.std(interface_df['cycleLength']))
print("Min:",np.min(interface_df['cycleLength']))
print("Max:",np.max(interface_df['cycleLength']))
print()
# %% [markdown]
## Time stats per interface
plot_box_chart(interface_dfs, 'cycleLength', 'Task completion time (sec)')
plot_box_chart(interface_dfs, 'numClicks', 'Number of clicks', has_labels=False)
plot_box_chart(interface_dfs, 'draggingDuration', 'Drag duration (sec)')
plot_scatter(all_interface_dfs)
def plot_bar_chart(interface_dfs, label, y_label, has_labels=True):
# %%
if has_labels:
fig = plt.figure(figsize=(10,5))
else:
fig = plt.figure(figsize=(8,5))
fig.subplots_adjust(hspace=0.6, wspace=0.3)
ax = fig.add_subplot(1,1,1)
means_clicks = []
means_prs = []
errors_prs = []
errors_clicks = []
for i in np.arange(len(targetbarclicks)):
interface_dfclicks = interface_dfs[targetbarclicks[i]]
interface_dfprs = interface_dfs[targetbarprs[i]]
means_clicks.append(np.mean(interface_dfclicks[label]))
means_prs.append(np.mean(interface_dfprs[label]))
errors_clicks.append(np.std(interface_dfclicks[label]))
errors_prs.append(np.std(interface_dfprs[label]))
#print("Mean:", np.mean(interface_dfclicks['cycleLength']))
#print("Standard Deviation:",np.std(interface_dfclicks['cycleLength']))
#print("Min:",np.min(interface_dfclicks['cycleLength']))
#print("Max:",np.max(interface_dfclicks['cycleLength']))
#print()
means_prs[4] = 0
errors_prs[4] = 0
ax.grid(color='gray', linestyle='-.', linewidth=1, axis='x', which='major', zorder=0)
y_pos = np.arange(len(targetplotnames))
width = 0.44
rects1 = ax.barh(y_pos - width/2, means_prs, width-0.02, xerr=errors_prs,
alpha=1.0, color=targetplotcolorslight, ecolor="gray", capsize=9, zorder=2)
rects2 = ax.barh(y_pos + width/2, means_clicks, width-0.02, xerr=errors_clicks,
alpha=1.0, color=targetplotcolors, ecolor="gray", capsize=9, zorder=2)
# ax.set_ylabel('Interface',fontsize=24)
ax.set_xlabel(y_label,fontsize=24, fontstyle='italic')
ax.set_yticks(y_pos)
if has_labels:
ax.set_yticklabels(targetplotnames)
else:
ax.set_yticklabels(['','','','',''])
#ax.set_title('Task Completion Time', fontsize=24, fontweight='bold')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(2.0)
ax.spines['left'].set_linewidth(2.0)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(axis='both', which='major', labelsize=24)
ax.set_xlim([0, 25])
#plt.xticks(rotation=45)
ax.invert_yaxis()
# for ytick, color in zip(ax.get_yticklabels(), targetplotcolors):
# ytick.set_color(color)
for rect1 in rects1[0:4]:
ax.text(0.1, rect1.get_y() + 0.36, '$\it{P/R}$', c="white", fontsize=20, fontfamily="Times New Roman")
for rect2 in rects2:
ax.text(0.1, rect2.get_y() + 0.36, '$\it{Click}$', c="white", fontsize=20, fontfamily="Times New Roman")
plt.tight_layout()
plt.savefig('data/' + label + '.pdf')
# plt.show()
def plot_scatter(interface_dfs):
# %% [markdown]
## Time vs. Distance (Euclidean, Orientation, and Combined)
### Euclidean Distance vs Time
# %%
fig = plt.figure(figsize=(16,6))
fig.subplots_adjust(hspace=0.6, wspace=0.3)
targetplots = ['panel-click','arrow-click','drag-click','targetdrag-click','target-click']
interface_dftargets = [interface_dfs[idx] for idx in targetplots]
for i, interface_df in enumerate(interface_dftargets):
ax = fig.add_subplot(2,5,str(i+1))
bx = fig.add_subplot(2,5,str(i+6))
ax.set_title(targetplotnames[i], fontsize=24)
#bx.set_title(targetplotnames[i], c=targetplotcolors[i], fontsize=16)
ax.scatter(interface_df['targetDistance'], interface_df['cycleLength'], c=targetplotcolors[i], marker=".")
bx.scatter(interface_df['threshXY'], interface_df['cycleLength'], c=targetplotcolors[i], marker=".")
lineax = fit_line(interface_df['targetDistance'], interface_df['cycleLength'])
linebx = fit_line(interface_df['threshXY'], interface_df['cycleLength'])
r_squaredax = lineax[2]
r_squaredbx = linebx[2]
ax.plot(lineax[0], lineax[1], c="black", linewidth=2.0)
bx.plot(linebx[0], linebx[1], c="black", linewidth=2.0)
# ax.text(80, 30, '$\mathbf{R^2}$ = %0.2f' %(1-r_squaredax), c="black", fontsize=20)
# bx.text(10, 30, '$\mathbf{R^2}$ = %0.2f' %(1-r_squaredbx), c="black", fontsize=20)
#_, _, r_val, _, _ = scp.stats.linregress(interface_df['targetDistance'], interface_df['cycleLength'])
#print(r_val**2)
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
bx.spines['right'].set_visible(False)
bx.spines['top'].set_visible(False)
for axis in ['bottom', 'left']:
ax.spines[axis].set_linewidth(3.0)
bx.spines[axis].set_linewidth(3.0)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
bx.yaxis.set_ticks_position('left')
bx.xaxis.set_ticks_position('bottom')
# Change the fontsize of tick labels
ax.tick_params(axis='both', which='major', labelsize=20)
bx.tick_params(axis='both', which='major', labelsize=20)
ax.set_xlabel('Target distance', fontsize=24, fontstyle='italic')
ax.set_ylim([0, 30])
bx.set_xlabel('Target size', fontsize=24, fontstyle='italic')
bx.set_ylim([0, 30])
if i == 0:
ax.set_ylabel('Time (sec)', fontsize=24, fontstyle='italic')
bx.set_ylabel('Time (sec)', fontsize=24, fontstyle='italic')
plt.tight_layout()
plt.savefig('data/scatter.pdf')
# plt.show()
def plot_bar_chart(interface_dfs, label, y_label, has_labels=True):
# %%
if has_labels:
fig = plt.figure(figsize=(10,5))
else:
fig = plt.figure(figsize=(8,5))
fig.subplots_adjust(hspace=0.6, wspace=0.3)
ax = fig.add_subplot(1,1,1)
means_clicks = []
means_prs = []
errors_prs = []
errors_clicks = []
for i in np.arange(len(targetbarclicks)):
interface_dfclicks = interface_dfs[targetbarclicks[i]]
interface_dfprs = interface_dfs[targetbarprs[i]]
means_clicks.append(np.mean(interface_dfclicks[label]))
means_prs.append(np.mean(interface_dfprs[label]))
errors_clicks.append(np.std(interface_dfclicks[label]))
errors_prs.append(np.std(interface_dfprs[label]))
#print("Mean:", np.mean(interface_dfclicks['cycleLength']))
#print("Standard Deviation:",np.std(interface_dfclicks['cycleLength']))
#print("Min:",np.min(interface_dfclicks['cycleLength']))
#print("Max:",np.max(interface_dfclicks['cycleLength']))
#print()
means_prs[4] = 0
errors_prs[4] = 0
ax.grid(color='gray', linestyle='-.', linewidth=1, axis='x', which='major', zorder=0)
y_pos = np.arange(len(targetplotnames))
width = 0.44
rects1 = ax.barh(y_pos - width/2, means_prs, width-0.02, xerr=errors_prs,
alpha=1.0, color=targetplotcolorslight, ecolor="gray", capsize=9, zorder=2)
rects2 = ax.barh(y_pos + width/2, means_clicks, width-0.02, xerr=errors_clicks,
alpha=1.0, color=targetplotcolors, ecolor="gray", capsize=9, zorder=2)
# ax.set_ylabel('Interface',fontsize=24)
ax.set_xlabel(y_label,fontsize=24, fontstyle='italic')
ax.set_yticks(y_pos)
if has_labels:
ax.set_yticklabels(targetplotnames)
else:
ax.set_yticklabels(['','','','',''])
#ax.set_title('Task Completion Time', fontsize=24, fontweight='bold')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(2.0)
ax.spines['left'].set_linewidth(2.0)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(axis='both', which='major', labelsize=24)
ax.set_xlim([0, 25])
#plt.xticks(rotation=45)
ax.invert_yaxis()
# for ytick, color in zip(ax.get_yticklabels(), targetplotcolors):
# ytick.set_color(color)
for rect1 in rects1[0:4]:
ax.text(0.1, rect1.get_y() + 0.36, '$\it{P/R}$', c="white", fontsize=20, fontfamily="Times New Roman")
for rect2 in rects2:
ax.text(0.1, rect2.get_y() + 0.36, '$\it{Click}$', c="white", fontsize=20, fontfamily="Times New Roman")
plt.tight_layout()
plt.savefig('data/' + label + '.pdf')
# plt.show()
def plot_box_chart(interface_dfs, label, y_label, has_labels=True):
# %%
if has_labels:
fig = plt.figure(figsize=(10,5))
else:
fig = plt.figure(figsize=(8,5))
fig.subplots_adjust(hspace=0.6, wspace=0.3)
ax = fig.add_subplot(1,1,1)
interface_data_combined = []
for i in np.arange(len(targetbarclicks)):
interface_dfclicks = interface_dfs[targetbarclicks[i]]
interface_dfprs = interface_dfs[targetbarprs[i]]
interface_data_combined.append(interface_dfprs[label])
interface_data_combined.append(interface_dfclicks[label])
interface_data_combined[8] = 0
# Combine both color lists every alternating items: https://stackoverflow.com/a/3678938
targetplotcolorscombined = [None]*(len(targetplotcolorslight)*2)
targetplotcolorscombined[::2] = targetplotcolorslight
targetplotcolorscombined[1::2] = targetplotcolors
flierprops = {'marker':'.', 'markerfacecolor':'none', 'markersize':10,
'linestyle':'none', 'markeredgecolor':'gray'}
width = 0.6
positions = np.arange(len(targetplotcolorscombined)) + np.array([(1-width)/2,0]*len(targetplotcolors)) + np.array([0, -(1-width)/2]*len(targetplotcolors)) # In order to group the boxes, we need to shift the top and bottom box of each pair up/down by half of the spacing (1-width)/2
bplot = ax.boxplot(interface_data_combined, 0, '.', 0, patch_artist=True, widths=width, positions=positions, flierprops=flierprops) # Setting patch_artist = True is requried to set the background color of the boxes: https://stackoverflow.com/a/28742262
for patch, color in zip(bplot['boxes'], targetplotcolorscombined):
patch.set(color="gray")
patch.set_facecolor(color)
for whisker in bplot['whiskers']:
whisker.set(color="gray")
for cap in bplot['caps']:
cap.set(color ='gray')
for median in bplot['medians']:
median.set(color='white')
y_pos = np.arange(len(targetplotnames))*2 - 0.3
ax.grid(color='gray', linestyle='-.', linewidth=1, axis='x', which='major', zorder=0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_linewidth(2.0)
ax.spines['left'].set_linewidth(2.0)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.tick_params(axis='both', which='major', labelsize=24)
ax.set_xlim([0, 25])
#plt.xticks(rotation=45)
# ax.set_ylabel('Interface',fontsize=24)
ax.set_xlabel(y_label,fontsize=24, fontstyle='italic')
ax.set_yticks(y_pos)
if has_labels:
ax.set_yticklabels(targetplotnames)
else:
ax.set_yticklabels(['','','','',''])
#ax.set_title('Task Completion Time', fontsize=24, fontweight='bold')
ax.invert_yaxis()
ax.set_aspect(1.4)
# for ytick, color in zip(ax.get_yticklabels(), targetplotcolors):
# ytick.set_color(color)
# for rect1 in rects1[0:4]:
# ax.text(0.1, rect1.get_y() + 0.32, '$\it{P/R}$', c="white", fontsize=22, fontfamily="Times New Roman")
# for rect2 in rects2:
# ax.text(0.1, rect2.get_y() + 0.32, '$\it{Click}$', c="white", fontsize=22, fontfamily="Times New Roman")
for i in range(len(positions)):
ax.text(-0.1, positions[i] + 0.3, '$\it{' + ('P/R' if (i%2 == 0) else 'Click') + '}$', c="black", fontsize=15, fontfamily="Times New Roman", zorder = 0, ha='right')
plt.tight_layout()
plt.savefig('data/' + label + '.pdf')
# plt.show()
# %%
# Custom tools
def fit_line(x, y):
'''
Fits a line to an input set of points
Returns a tuple of the x and y components of the line
Adapted from: https://stackoverflow.com/a/31800660/6454085
'''
#correlation_matrix = np.corrcoef(x,y)
#correlation_xy = correlation_matrix[0,1]
#r_squared = correlation_xy**2
y_hat = np.poly1d(np.polyfit(x, y, 1))(x)
y_bar =np.sum(y)/len(y)
ssres = np.sum((y_hat - y)**2)
sstot = | np.sum((y - y_bar)**2) | numpy.sum |
"""Create artificial data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.stats as stats
def decaying_multi_normal(dimensions, size, alpha=1):
"""Create multinormal data with exponentially decaying principal components.
Creates a two-dimensional numpy array such that a PCA yields principal
components with exponentially decaying variance.
Args:
dimensions: How many dimensions should the data have?
size: How many samples should be drawn?
alpha: The exponential decay constant: how fast should the variance
of the principal components decay (default: 1)? Only non-negative
values are allowed.
Returns:
A two-dimensional numpy array with one sample per row and one dimension
per column.
Raises:
ValueError: alpha is negative.
"""
if alpha < 0:
raise ValueError("alpha must be non-negative.")
pc_variance = np.exp(-alpha*np.array(range(dimensions)))
rand_ortho = stats.ortho_group.rvs(dimensions)
rand_normal = np.random.normal(scale=pc_variance, size=(size, dimensions))
rand_input = | np.matmul(rand_normal, rand_ortho) | numpy.matmul |
import numpy as np
import pandas as pd
def interpolate_traj(trks, threshold, mark_interpolation=False, drop_len=1):
trks = trks[np.argsort(trks[:, 1])]
feat_dim = trks[:, 5:].shape[1]
traj_df = pd.DataFrame(data=trks[:, :5], columns=['frame', 'trkid', 'y', 'x', 'z'])
reixed_traj_df = traj_df.set_index('trkid')
full_traj_dfs = []
traj_start_ends = traj_df.groupby('trkid')['frame'].agg(['min', 'max'])
for ped_id, (traj_start, traj_end) in traj_start_ends.iterrows():
if ped_id != -1:
full_traj_df = pd.DataFrame(data= | np.arange(traj_start, traj_end + 1) | numpy.arange |
# -*- coding: utf-8 -*-
'''
This code calculates impacts of temperature changes induced by aerosols on GDP
apply the Dell et al. damage function
distribution of Dell et al. parameter was sampled (1000 times) based on the provided median and standard error
by <NAME> (<EMAIL>)
'''
from netCDF4 import Dataset
import pandas as pd
import numpy as np
import _env
import datetime
import xarray as xr
nens = _env.nens
datasets = _env.datasets
year = _env.year
syr = str(year)
gdp_year = year
sgdp_year = str(gdp_year)
par = 'TREFHT'
ds = 'ERA-Interim'
p_scen = 'No-Aerosol'
if_temp = _env.odir_root + '/sim_temperature/Simulated_Global_and_Country_' + par + '_20yravg.nc'
if_ctry_list = _env.idir_root + '/regioncode/Country_List.xls'
if_ctry_pr = _env.idir_root + '/historical_stat/Ctry_Poor_Rich_from_Burke.csv' #adopt country list from Burke et al. 2018
if_ctry_gdpcap = _env.idir_root + '/historical_stat/' + '/API_NY.GDP.PCAP.KD_DS2_en_csv_v2.csv'
if_ctry_pop = _env.idir_root + '/historical_stat/' + '/API_SP.POP.TOTL_DS2_en_csv_v2.csv'
odir_gdp = _env.odir_root + '/gdp_' + ds + '/'
_env.mkdirs(odir_gdp)
#climatological temperature from three datasets
if_clim_temp = _env.odir_root + 'sim_temperature/Climatological_Temp_Ctry_3ds.csv'
itbl_clim_temp = pd.read_csv(if_clim_temp,index_col = 0)[['iso',ds]]
#country list
itbl_ctry_info = pd.read_csv(_env.odir_root + '/basic_stats/' + 'Country_Basic_Stats.csv')
#read global and country-level temperature
T_glob = Dataset(if_temp)['TREFHT_Global'][:,[0,1]]
T_ctry_full = Dataset(if_temp)['TREFHT_Country'][:,:,[0,1]]
#extract temperature for analyzed countries
T_ctry = T_ctry_full[((itbl_ctry_info['ind_in_full_list'].astype(int)).tolist()),:,:]
T_diff = T_ctry[:,:,1]-T_ctry[:,:,0]
T_ctry[:,:,0] = np.repeat(np.array(itbl_clim_temp[ds].values)[:,np.newaxis],8,axis=1)
T_ctry[:,:,1] = T_ctry[:,:,0] + T_diff
####country-level changes in GDP/cap growth rate####
########
# the net effect of a 1◦ C rise in temperature is to decrease growth rates in poor countries by −1.394 percentage points. (Dell,Jones, and Olken, 2012) Table 2
#median = -1.394
#standard error=0.408
if_gen_pars = 0
n_boot_sample = 1000
def cal_theta(theta,se_theta):
return np.random.normal(loc=theta,scale=se_theta,size=n_boot_sample)
if if_gen_pars:
#generate 1000 sets of parameters for the selected damage function
djo_pars = cal_theta(-1.394,0.408)/100
_env.mkdirs(_env.idir_root + '/Dell_parameters/')
xr.Dataset({'djo_pars' : xr.DataArray(djo_pars,dims = ['boots'])}).to_netcdf(_env.idir_root + '/Dell_parameters/' + '/DJO_parameters.nc')
else:
djo_pars = xr.open_dataset(_env.idir_root + '/Dell_parameters/' + '/DJO_parameters.nc')['djo_pars'].values
n_ctry = len(itbl_ctry_info.index)
ifs_rich = 1-itbl_ctry_info['poor']
poor_ind = np.where(ifs_rich == 0)[0]
diff_gr = np.zeros([n_boot_sample, np.shape(T_ctry)[0],np.shape(T_ctry)[1]])
diff_gr[:,poor_ind,:] = np.einsum('i,jk->ijk',djo_pars, np.squeeze(T_ctry[poor_ind,:,1]-T_ctry[poor_ind,:,0])) #*(0.2609434-1.655145)/100 #no-aerosol minus with-aerosol
diff_gdp = np.einsum('ijk,j->ijk',diff_gr,itbl_ctry_info[str(gdp_year) + '_gdp'])
_env.rmfile(odir_gdp + 'GDP_Changes_' + 'Dell_' + str(gdp_year) + '_' + ds + '_' + p_scen + '.nc')
onc = Dataset(odir_gdp + 'GDP_Changes_' + 'Dell_' + str(gdp_year) + '_' + ds + '_' + p_scen + '.nc', 'w', format='NETCDF4')
d_ctry = onc.createDimension('boots',n_boot_sample)
d_ctry = onc.createDimension('countries',n_ctry)
d_ens = onc.createDimension('ensembles',nens)
v_ratio = onc.createVariable('GDP_Ratio','f4',('boots','countries','ensembles'))
v_ratio.desc = 'Impacts of aerosol-induced cooling on annual GDP growth rate'
v_ratio[:] = diff_gr
v_gdp = onc.createVariable('GDP','f4',('boots','countries','ensembles'))
v_gdp.desc = 'Impacts of aerosol-induced cooling on country-level annual GDP'
v_gdp[:] = diff_gdp
#write global attribute
onc.by = '<NAME> (<EMAIL>)'
onc.desc = 'Impacts of aerosol-induced cooling on annual GDP and GDP growth rate (based on damage functions by Pretis et al. 2018)'
onc.creattime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
onc.close()
####summarize global and regional GDP changes####
itbl_gdp_baseline = itbl_ctry_info.copy()
odir_summary = _env.odir_root + 'summary_' + ds
_env.mkdirs(odir_summary)
writer = pd.ExcelWriter(odir_summary + '/country_specific_statistics_GDP_'+ds+'_'+p_scen+'_Dell.xls')
otbls_ctry_GDP_stat = {}
gdp_tot = itbl_gdp_baseline[sgdp_year + '_gdp'].sum()
spe = 'Dell'
otbl_median = pd.DataFrame(index=[spe],columns = ['median','median_ratio','5','5_ratio','95','95_ratio','10','10_ratio','90','90_ratio','prob_benefit'])
imtrx_gdp = diff_gdp.copy()
##global total
imtrx_gdp_glob = (imtrx_gdp).sum(axis=1)
otbl_median.loc[spe] = np.median(imtrx_gdp_glob)/1e9,np.median(imtrx_gdp_glob)/gdp_tot*100,np.percentile(imtrx_gdp_glob,95)/1e9,np.percentile(imtrx_gdp_glob,95)/gdp_tot*100,np.percentile(imtrx_gdp_glob,5)/1e9,np.percentile(imtrx_gdp_glob,5)/gdp_tot*100, np.percentile(imtrx_gdp_glob,90)/1e9,np.percentile(imtrx_gdp_glob,90)/gdp_tot*100,np.percentile(imtrx_gdp_glob,10)/1e9,np.percentile(imtrx_gdp_glob,10)/gdp_tot*100,len(np.where(imtrx_gdp_glob<0)[0])/np.size(imtrx_gdp_glob)
otbl_ctry_GDP_stat = itbl_gdp_baseline.copy()
otbl_ctry_GDP_stat['GDP_mean_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_median_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_mean_benefit_ratio'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_median_benefit_ratio'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_90_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_10_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_95_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['GDP_5_benefit'] = np.zeros(len(otbl_ctry_GDP_stat.index))
otbl_ctry_GDP_stat['probability_damage'] = np.zeros(len(otbl_ctry_GDP_stat.index)) #add by yz 20190719
for ictry,ctry in enumerate(itbl_ctry_info.index):
imtrx_country = (imtrx_gdp)[:,ictry,:]
otbl_ctry_GDP_stat.loc[ctry,'GDP_mean_benefit'] = -np.mean(imtrx_country)
otbl_ctry_GDP_stat.loc[ctry,'GDP_median_benefit'] = -np.median(imtrx_country)
otbl_ctry_GDP_stat.loc[ctry,'GDP_90_benefit'] = -np.percentile(imtrx_country,90)
otbl_ctry_GDP_stat.loc[ctry,'GDP_10_benefit'] = -np.percentile(imtrx_country,10)
otbl_ctry_GDP_stat.loc[ctry,'GDP_95_benefit'] = -np.percentile(imtrx_country,95)
otbl_ctry_GDP_stat.loc[ctry,'GDP_5_benefit'] = -np.percentile(imtrx_country,5)
otbl_ctry_GDP_stat.loc[ctry,'probability_damage'] = len(imtrx_country[imtrx_country>0])/np.size(imtrx_country)
otbl_ctry_GDP_stat['GDP_mean_benefit_ratio'] = otbl_ctry_GDP_stat['GDP_mean_benefit']/otbl_ctry_GDP_stat[sgdp_year+'_gdp']*100
otbl_ctry_GDP_stat['GDP_median_benefit_ratio'] = otbl_ctry_GDP_stat['GDP_median_benefit']/otbl_ctry_GDP_stat[sgdp_year+'_gdp']*100
otbl_ctry_GDP_stat.to_excel(writer,spe)
otbls_ctry_GDP_stat[spe] = otbl_ctry_GDP_stat.copy()
otbl_median = -otbl_median
otbl_median.to_excel(writer,'median_summary')
writer.save()
#==================changes in 90:10 and 80:20 ratio (inequality)===========================
itbl_gdp_baseline.sort_values([sgdp_year + '_gdpcap'],inplace=True)
tot_pop = itbl_gdp_baseline[sgdp_year + '_pop'].sum()
itbl_gdp_baseline[sgdp_year + '_gdpsum'] = 0
itbl_gdp_baseline[sgdp_year + '_popsum'] = 0
for irow, row in enumerate(itbl_gdp_baseline.index):
if irow == 0:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
else:
itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline[sgdp_year + '_gdpsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']
itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline[sgdp_year + '_popsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_pop']
itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] = itbl_gdp_baseline[sgdp_year + '_popsum']/tot_pop
#deciles (<=10% and >=90%)
deciles = {}
ind10 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.1)[0]
deciles[10] = itbl_gdp_baseline.iloc[ind10].copy()
ind90 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.9)[0]
deciles[90] = itbl_gdp_baseline.iloc[ind90].copy()
#quintiles (<=20% and >=80%)
ind20 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.2)[0]
deciles[20] = itbl_gdp_baseline.iloc[ind20].copy()
ind80 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.8)[0]
deciles[80] = itbl_gdp_baseline.iloc[ind80].copy()
writer = pd.ExcelWriter(odir_summary + '/Deciles_and_Quintile_ratio_changes_'+ds+'_'+p_scen+'_Dell.xls')
otbls = {}
otbl_ineq = pd.DataFrame(index=[spe],columns = ['median_ratio','5_ratio','95_ratio','10_ratio','90_ratio','probability_reduced'])
otbls['deciles'] = otbl_ineq.copy()
otbls['quintiles'] = otbl_ineq.copy()
omtrx_gdp_spe = diff_gdp.copy()
dec_var = {}
dec_base = {}
for perc in [10,20,80,90]:
dec = deciles[perc].copy()
dec_pop_tot = dec[sgdp_year + '_pop'].sum()
dec_gdp_tot = dec[sgdp_year + '_gdp'].sum()
dec_base[perc] = dec_gdp_tot/dec_pop_tot
ind_ctry = dec.index
imtrx_dec = omtrx_gdp_spe[:,ind_ctry,:]
imtrx_dec_sum = dec_gdp_tot-(imtrx_dec).sum(axis=1) #+ dec_gdp_tot
dec_gdpcap = imtrx_dec_sum/dec_pop_tot
dec_var[perc] = dec_gdpcap.copy()
dec_diff = (dec_var[90]/dec_var[10]-dec_base[90]/dec_base[10])/(dec_base[90]/dec_base[10])*100
quin_diff = (dec_var[80]/dec_var[20] - dec_base[80]/dec_base[20])/(dec_base[80]/dec_base[20])*100
otbls['deciles'].loc[spe,'median_ratio'] = np.median(dec_diff)
otbls['deciles'].loc[spe,'5_ratio'] = np.percentile(dec_diff,5)
otbls['deciles'].loc[spe,'95_ratio'] = np.percentile(dec_diff,95)
otbls['deciles'].loc[spe,'10_ratio'] = np.percentile(dec_diff,10)
otbls['deciles'].loc[spe,'90_ratio'] = np.percentile(dec_diff,90)
otbls['deciles'].loc[spe,'probability_reduced'] = len(dec_diff[dec_diff<0])/np.size(dec_diff)
otbls['quintiles'].loc[spe,'median_ratio'] = np.median(quin_diff)
otbls['quintiles'].loc[spe,'5_ratio'] = np.percentile(quin_diff,5)
otbls['quintiles'].loc[spe,'95_ratio'] = np.percentile(quin_diff,95)
otbls['quintiles'].loc[spe,'10_ratio'] = np.percentile(quin_diff,10)
otbls['quintiles'].loc[spe,'90_ratio'] = np.percentile(quin_diff,90)
otbls['quintiles'].loc[spe,'probability_reduced'] = len(quin_diff[quin_diff<0])/ | np.size(quin_diff) | numpy.size |
""" A convenient plotting container
In this package implements :class:`Plotter`, which is a simple container to
dictionary like structure (e.g. :class:`dict`, :class:`np.recarray`,
:class:`pandas.DataFrame`). It allows the user to plot directly using keys of
the data and also allows rapid group plotting routines (groupy and facets).
I was basically tired of all the packages doing fancy things and not allowing
basics or requiring a lot of dependencies.
Examples
--------
.. code-block::python
>> d = {...}
>> p = plotter.Plotter(d)
>> g = p.groupby('BRK', markers='<^>v.oxs', colors='parula_r')
>> g.plot('CRA', 'CDEC')
>> g.colorbar().set_label('BRK')
Multiple groups can be done as well. (Caution, the `facet` option is not robust)
.. code-block::python
>> g = p.groupby('BRK', facet=True, sharex=True, sharey=True).groupby('FLD')
>> g.plot('CRA', 'CDEC', 'o')
.. note::
* tested with python 2.7, & 3.4
* tested compatible with pandas (not required)
* requirements: numpy, matplotlib
:author: <NAME>
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
PY3 = sys.version_info[0] > 2
if PY3:
basestring = (str, bytes)
else:
basestring = (str, unicode)
import pylab as plt
import matplotlib as mpl
import numpy as np
import itertools
__all__ = ['Group', 'Plotter', 'create_common_cbar', 'colorify', 'evalexpr', 'create_common_legend']
def get_doc_from(name, obj=plt):
""" decorator to add documentation from a module (default: matplotlib)
Parameters
----------
name: str
name of the function to get the documentation from
obj: object
module from which the function is an attribute
Returns
-------
decorator: callable
decorator
"""
def deco(func):
fn = getattr(obj, name, None)
if fn is not None:
if func.__doc__ is None:
func.__doc__ = fn.__doc__
else:
func.__doc__ += fn.__doc__
return func
return deco
def _groupby(data, key):
""" create an iterator which returns (key, DataFrame) grouped by each
value of key(value) """
for k, index in _arg_groupby(data, key):
d = {a: b[index] for a,b in data.items()}
yield k, data.__class__(d)
def _arg_groupby(data, key):
""" create an iterator which returns (key, index) grouped by each
value of key(value) """
val = data[key]
ind = sorted(zip(val, range(len(val))), key=lambda x:x[0])
for k, grp in itertools.groupby(ind, lambda x: x[0]):
index = [k[1] for k in grp]
yield k, index
class Group(object):
""" Group multiple plotter instances into one container. This offers any function
of :class:`Plotter` through an implicit loop of any method It allows for
instance to generate multiple plots on the same axes or even facet plot
(one per group).
.. code-block:: python
>> g = Plotter(df).groupby('class')
>> g.set_options(facet=True, ncols=2, projection='aitoff')
# which is equivalent to
>> g = Plotter(df).groupby('class', facet=True, ncols=2, projection='aitoff')
>> g.plot('RA', 'Dec', 'o', alpha=0.5, mec='None')
Attributes
----------
seq: sequence
Sequence of Plotter instances
title: str
name of the group (used as label is nested groups)
facet: bool
set to use facets, i.e., one subplot per element of the group
markers: iterable
sequence of markers one per group
linestyles: iterable
sequence of linestyles one per group
colors: seq or Colormap
sequence of colors or Colormap instance from which deriving a
sequence of colors to encode each group
if Colormap instance, a cmap attribute will be generated after a
plot and will refer to the updated instance
sharex: bool
set to share x-axis with all subplots
sharey: bool
set to share y-axis with all subplots
kwargs: dict
any other option will be forwarded to :func:`plt.subplot`
.. see also::
:func:`set_options`
"""
def __init__(self, seq, title='', **kwargs):
self.seq = seq
self.title = title
self.facet = False
self.markers = None
self.linestyles = None
self.colors = None
self.ncols = 3
self.sharex = False
self.sharey = False
self.axes = None
self.kwargs = {}
self.create_common_cbar = create_common_cbar
self.set_options(**kwargs)
self.show = plt.show
def make_facets(self):
""" generates multiple subplots
uses self.ncols as number of columns
and subplots are also using self.kwargs.
Returns
-------
axes: sequence
sequence of the axes instance from the subplots
.. see also::
:func:`set_options`
"""
axes = []
n = len(self)
ncols = self.ncols
nlines = n // ncols
if ncols * nlines < n:
nlines += 1
if nlines == 0:
nlines = 1
ncols = n
axes = []
ax = sharex = sharey = None
for k in range(n):
if self.sharex:
sharex = ax
if self.sharey:
sharey = ax
ax = plt.subplot(nlines, ncols, k + 1, sharex=sharex,
sharey=sharey, **self.kwargs)
axes.append(ax)
if self.seq[k].label is not None:
ax.set_title(self.seq[k].label)
if (self.sharex):
if k < (n - ncols):
plt.setp(ax.get_xticklabels(), visible=False)
if (self.sharey):
if (k % ncols) > 0:
plt.setp(ax.get_yticklabels(), visible=False)
self.axes = axes
return axes
def set_options(self, **kwargs):
""" Set some options
Parameters
----------
title: str
rename the group
facet: bool
set the group to display facets or one plot
ncols: int
when facet is True, this gives how many columns should be used
markers: seq
sequence of markers (will cycle through)
linestyles: seq
sequence of linestyles (will cycle through)
colors: seq or Colormap
sequence of colors or Colormap instance from which deriving a
sequence of colors to encode each group
if Colormap instance, a cmap attribute will be generated after a
plot and will refer to the updated instance
sharex: bool
set to share x-axis with all subplots
sharey: bool
set to share y-axis with all subplots
kwargs: dict
any other option will be forwarded to :func:`plt.subplot`
Returns
-------
self: Group instance
returns itself for conveniance when writting one liners.
"""
title = kwargs.pop('title', None)
facet = kwargs.pop('facet', None)
ncols = kwargs.pop('ncols', None)
markers = kwargs.pop('markers', None)
colors = kwargs.pop('colors', None)
linestyles = kwargs.pop('linestyles', None)
labels = kwargs.pop('labels', None)
sharex = kwargs.pop('sharex', None)
sharey = kwargs.pop('sharey', None)
allow_expressions = kwargs.pop('allow_expressions', None)
if sharex is not None:
self.sharex = sharex
if sharey is not None:
self.sharey = sharey
if title is not None:
self.title = title
if facet is not None:
self.facet = facet
if ncols is not None:
self.ncols = ncols
if markers is not None:
self.markers = markers
if colors is not None:
self.colors = colors
if type(self.colors) in basestring:
self.colors = plt.cm.get_cmap(self.colors)
if linestyles is not None:
self.linestyles = linestyles
if labels is not None:
for k, v in zip(self.seq, itertools.cycle(labels)):
k.label = v
if allow_expressions is not None:
for k in self.seq:
k.allow_expressions = allow_expressions
self.kwargs.update(kwargs)
return self
def groupby(self, key, select=None, labels=None, **kwargs):
""" Make individual plots per group
Parameters
----------
key: str
key on which building groups
select: sequence
explicit selection on the groups
if a group does not exist, it will be returned empty
labels: dict
set to replace the group names by a specific label string during
the plot
kwargs: dict
optional keywords forwarded to :func:`set_options` method
Returns
-------
g: Group instance
group of plotters
.. see also::
:func:`set_options`
"""
gg = []
for sk in self.seq:
lst = sk.groupby(key, select=select, labels=labels)
for k, v in sk.__dict__.items():
if k not in ['seq', 'title']:
setattr(lst, k, v)
if getattr(sk, 'title', None) is not None:
lst.label = sk.title
lst.set_options(**kwargs)
gg.append(lst)
return self.__class__(gg, title=self.title)
def subplot(self, *args, **kwargs):
""" A convenient shortcut for one liner use
Generates a subplot with given arguments and returns `self`.
"""
self.axes = plt.subplot(*args, **kwargs)
return self
def __len__(self):
return len(self.seq)
def __repr__(self):
txt = """Object Group {0:s} (length={2:d}): {1:s}"""
return txt.format(self.title, object.__repr__(self), len(self))
def __dir__(self):
""" show the content of Plotter """
return self.seq[0].__dir__()
def __getattr__(self, k):
""" Returns a looper function on each plotter of the group """
cyclenames = 'linestyles', 'colors', 'markers'
cyclekw = {k: getattr(self, k) for k in cyclenames}
if isinstance(self.colors, mpl.colors.Colormap):
s = set()
for sk in self.seq:
s = s.union(set(sk.data[self.title]))
colors, cmap = colorify(s)
cyclekw['colors'] = colors
self.cmap = cmap
if self.facet:
axes = self.make_facets()
return self.looper_facet_method(self.seq, k, axes, cyclekw=cyclekw)
else:
return self.looper_method(self.seq, k, cyclekw=cyclekw)
def __iter__(self):
""" Iterator over the individual plotter of the group """
for k in self.seq:
yield k
def __getitem__(self, k):
""" Returns one plotter of the group """
return self.seq[k]
@staticmethod
def looper_method(lst, methodname, cyclekw={}, **kw):
""" calls a method on many instance of sequence of objects
Parameters
----------
lst: sequence
sequence of objects to call the method from
methodname: str
name of the method to call from each object
cyclekw: dict
keyword arguments that calls need to cycle over per object.
Each element in this dictionary is expected to be a sequence and one
element of each will be used per call. It will use
:func:`itertools.cycle`. (None elements are filtered)
cyclenames = 'linestyles', 'colors', 'markers'
kw: dict
other keywords (have priority on `cyclekw`)
Returns
-------
deco: callable
mapper function
"""
cyclenames = 'linestyles', 'colors', 'markers'
_cyclekw = {k: itertools.cycle(cyclekw[k])
for k in cyclenames if cyclekw[k] is not None }
def next_cyclekw():
a = {k[:-1]:next(v) for k, v in _cyclekw.items()}
return a
def deco(*args, **kwargs):
r = []
for l in lst:
k0 = next_cyclekw()
kw.update(k0)
kw.update(kwargs)
if (l.data is None) or (np.size(l.data) == 0):
a = None
else:
a = getattr(l, methodname)(*args, **kw)
r.append(a)
return r
return deco
@staticmethod
def looper_facet_method(lst, methodname, axes, cyclekw={}, **kw):
"""
calls a method on many instance of sequence of objects but also imposes
ax as keyword argument. This method will also test if there is no data
to plot.
Parameters
----------
lst: sequence
sequence of objects to call the method from
methodname: str
name of the method to call from each object
axes: sequence
list of axes, one per call
cyclekw: dict
keyword arguments that calls need to cycle over per object.
Each element in this dictionary is expected to be a sequence and one
element of each will be used per call. It will use
:func:`itertools.cycle`. (None elements are filtered)
cyclenames = 'linestyles', 'colors', 'markers'
kw: dict
other keywords (have priority on `cyclekw`)
Returns
-------
deco: callable
mapper function
"""
cyclenames = 'linestyles', 'colors', 'markers'
_cyclekw = {k: itertools.cycle(cyclekw[k])
for k in cyclenames if cyclekw[k] is not None }
def next_cyclekw():
a = {k[:-1]:next(v) for k, v in _cyclekw.items()}
return a
def deco(*args, **kwargs):
r = []
for l, ax in zip(lst, axes):
k0 = next_cyclekw()
kw.update(k0)
kw.update(kwargs)
if (l.data is None) or ( | np.size(l.data) | numpy.size |
import sys
import argparse
from functools import reduce
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.metrics import roc_auc_score
from sklearn.linear_model import Ridge, LinearRegression
import torch
import torch.nn as nn
from zamlexplain.data import load_data
from model import RealNVP
def mean_sd(df_x, df_gen):
df_x = df_x.iloc[:,:17]
df_gen = df_gen.iloc[:,:17]
mean_x = df_x.mean()
mean_gen = df_gen.mean()
mean_err = 100*(mean_gen - mean_x)/mean_x
df_mean = pd.DataFrame(OrderedDict({
'data mean': mean_x,
'synth mean': mean_gen,
'err %': mean_err})).round({'data mean': 2, 'synth mean': 2, 'err %': 0})
std_x = df_x.std()
std_gen = df_gen.std()
std_err = 100*(std_gen - std_x)/std_x
df_std = pd.DataFrame(OrderedDict({
'data std': std_x,
'synth std': std_gen,
'err %': std_err})).round({'data std': 2, 'synth std': 2, 'err %': 0})
return df_mean, df_std
def fix_df(x, scaler, return_numpy=False):
x = scaler.inverse_transform(x.copy())
for cat_idx in scaler.cat_cols:
if len(cat_idx) == 1:
x[:, cat_idx] = (x[:,cat_idx] > 0.5).astype(np.float32)
else:
new_ohe = np.zeros((x.shape[0], len(cat_idx)), dtype=np.float32)
new_ohe[np.arange(x.shape[0]), np.argmax(x[:, cat_idx], axis=1)] = 1.0
x[:, cat_idx] = new_ohe
# delinq_2yrs, inq, mths, mths, open
for i in [5, 6, 7, 8, 9, 10, 12, 16]:
x[x[:,i] < 0, i] = 0.0
x[:, i] = np.round(x[:, i])
if return_numpy:
return x
else:
return pd.DataFrame(x, columns=scaler.columns)
def un_ohe(df, scaler):
df = df.copy()
cat_cols = [cat_idx for cat_idx in scaler.cat_cols if len(cat_idx) > 1]
for cat_idx in cat_cols:
pref, suffs = get_pref(scaler.columns[cat_idx])
suffs = np.array(suffs)
df[pref] = suffs[np.argmax(df.iloc[:, cat_idx].as_matrix(), axis=1)]
cat_arr = np.array(reduce(lambda x,y: x+y, cat_cols))
return df.drop(labels=scaler.columns[cat_arr], axis=1)
def drop_static(df):
df = df.copy()
to_drop = []
for i in range(df.shape[1]):
if len(df.iloc[:,i].unique()) == 1:
to_drop += [i]
return df.drop(labels=df.columns[to_drop], axis=1)
def get_pref(lst):
if len(lst) == 1:
pref = lst[0]
suffs = ['']
else:
cnt = 0
while all([lst[0][cnt] == el[cnt] for el in lst]):
cnt += 1
pref = lst[0][:cnt]
suffs = [el[cnt:] for el in lst]
return pref.rstrip('_'), suffs
def categorical_hist(df_x, df_gen, scaler):
fig = plt.figure(1, figsize=(8, 8))
cnt = 0
for cat_idx in scaler.cat_cols:
n_var = len(cat_idx)
if n_var > 1:
x_vals = np.argmax(df_x.iloc[:, cat_idx].as_matrix(), axis=1)
gen_vals = np.argmax(df_gen.iloc[:, cat_idx].as_matrix(), axis=1)
x_hist = np.histogram(x_vals, np.arange(n_var+1))[0]
x_hist = x_hist/np.sum(x_hist)
gen_hist = np.histogram(gen_vals, np.arange(n_var+1))[0]
gen_hist = gen_hist/np.sum(gen_hist)
pref, suffs = get_pref(scaler.columns[cat_idx])
plt.subplot(2, 2, cnt+1)
cnt += 1
obj1 = plt.bar(np.arange(n_var)-0.1, x_hist, width=0.15, color='b', align='center')
obj2 = plt.bar(np.arange(n_var)+0.1, gen_hist, width=0.15, color='r', align='center')
plt.xticks(np.arange(n_var), suffs, rotation=30, ha='right')
plt.title(pref.rstrip('_'))
plt.subplots_adjust(hspace=0.4)
fig.legend([obj1, obj2], ['real', 'synth'], loc='upper center')
def payment_error(df):
def payment(p, r, n):
r /= 12
return p*(r*(1+r)**n)/((1+r)**n - 1)
term = np.array([36 if t36 >= t60 else 60 for t60, t36 in zip(df['term_60months'], df['term_36months'])])
calc = payment(df['loan_amnt'], df['int_rate'], term)
df_payment = pd.DataFrame({'Synth installment': df['installment'], 'Calc installment': calc})
df_payment.to_csv('installment.csv', index=False)
error = 100* (calc - df['installment'])/calc
fig = plt.figure(2)
error.plot.hist(ax=fig.gca(), title='% error in payment calculation', range=[-100, 100], bins=50)
plt.xlabel('%')
def quality_test(df_x, df_gen, scaler):
# check means vs. sd
df_mean, df_sd = mean_sd(df_x, df_gen)
print(df_mean)
df_mean.to_csv('mean.csv')
print(df_sd)
df_sd.to_csv('std.csv')
categorical_hist(df_x, df_gen, scaler)
payment_error(df_gen)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', default='flow_model.pytorch', help='training RealNVP model')
parser.add_argument('--n_samples', default=10000, type=int, help='number of samples to use for reconstruction quality tests')
parser.add_argument('--quality', action='store_true', help='run reconstruction quality tests')
parser.add_argument('--sensitivity', action='store_true', help='run sensitivity demo')
parser.add_argument('--improvement', action='store_true', help='run score improvement demo')
args = parser.parse_args(sys.argv[1:])
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
x, y, scaler = load_data('lendingclub', is_tree=False, scaler_type='standardize')
x = np.concatenate([x, np.zeros((x.shape[0], 1))], axis=1).astype(np.float32)
flow = RealNVP(x.shape[1], device)
if device.type == 'cpu':
flow.load_state_dict(torch.load(args.model, map_location='cpu'))
else:
flow.load_state_dict(torch.load(args.model))
flow.to(device)
flow.eval()
# produce samples
x_gen = flow.g(flow.prior.sample((args.n_samples,))).detach().cpu().numpy()[:,:-1]
np.save('samples.npy', x_gen)
df_x = scaler.as_dataframe(x[:,:-1])
df_gen = fix_df(x_gen, scaler)
df_gen.to_csv('real.csv')
# reconstruction quality ---------------------
if args.quality:
quality_test(df_x, df_gen, scaler)
# build a model
param = {'max_depth': 4, 'silent': 1, 'objective': 'binary:logistic'}
num_round = 20
num_train = 35000
bst = xgb.train(param, xgb.DMatrix(x[:num_train], label=y[:num_train]), num_round)
pred_val = bst.predict(xgb.DMatrix(x[num_train:]))
val_auc = roc_auc_score(y[num_train:], pred_val)
print('\nAUC score on val set: {:.03f}'.format(val_auc))
pred_fn = lambda x: bst.predict(xgb.DMatrix(x))
shap_fn = lambda x: bst.predict(xgb.DMatrix(x), pred_contribs=True)
inf_fn = lambda x: flow.f(torch.from_numpy(x.astype(np.float32)).to(device))[0].detach().cpu().numpy()
gen_fn = lambda z: flow.g(torch.from_numpy(z.astype(np.float32)).to(device)).detach().cpu().numpy()
logp_fn = lambda x: flow.log_prob(torch.from_numpy(x.astype(np.float32)).to(device)).detach().cpu().numpy()
if args.sensitivity:
noise_sd = 0.1
n_nbhrs = 40
i = np.random.randint(num_train, x.shape[0], 1)[0]
print('\nSensitivity for sample {:d}'.format(i))
x_test = x[i][None,:]
z_test = inf_fn(x_test)
z_nbhr = z_test + noise_sd * np.random.randn(n_nbhrs, z_test.shape[1]).astype(np.float32)
x_nbhr = gen_fn(z_nbhr)
def fixer(x, scaler):
"""Make fixed np array that is standardized"""
x_new = fix_df(x[:,:-1], scaler, return_numpy=True)
x_new = scaler.transform(x_new)
return np.concatenate([x_new, np.zeros((x_new.shape[0],1), dtype=np.float32)], axis=1)
x_nbhr = fixer(x_nbhr, scaler)
pred_nbhr = pred_fn(x_nbhr)
pred_test = pred_fn(x_test)
shap_values = shap_fn(x_test)[0][:-2]
best_idx_shap = np.argsort(-np.abs(shap_values))[:10]
the_crew_np = | np.concatenate([x_test, x_nbhr], axis=0) | numpy.concatenate |
from numpy.core.fromnumeric import transpose
import sortedcontainers
from pychastic.cached_gaussian import normal
import numpy as np
import jax.numpy as jnp
import math
class Wiener:
'''
Class for sampling, and memorization of Wiener process.
'''
def __init__(self, seed=None):
self.sample_points = sortedcontainers.SortedDict()
self.sample_points[0.] = {
'w': 0.0,
#'zToPrevPoint': 0.0
}
self.normal_generator = normal(seed=seed)
self.t_max = 0
self.last_w = 0
#@profile
def get_w(self, t):
'''
Get value of Wiener probess at specified timestamp.
Parameters
----------
t: float
Time at which the process should be sampled. Has to be non-negative.
Returns
-------
float
Value of Wiener process at time ``t``.
Example
-------
>>> wiener = Wiener()
>>> dW = wiener.get_w(1.0) - wiener.get_w(0.0)
>>> dW
0.321 #random value from N(0,1)
'''
if not t >= 0:
raise ValueError('Illegal (negative?) timestamp')
if t in self.sample_points:
return self.sample_points[t]['w']
t_max = self.t_max
if t > t_max:
normal = next(self.normal_generator)
next_w = self.last_w + np.sqrt(t-t_max)*normal
self.sample_points[t] = {'w': next_w}
self.t_max = t
self.last_w = next_w
else:
next_i = self.sample_points.bisect_left(t)
next_t = self.sample_points.peekitem(next_i)[0]
prev_t = self.sample_points.peekitem(next_i-1)[0]
next_w = self.sample_points.peekitem(next_i)[1]['w']
prev_w = self.sample_points.peekitem(next_i-1)[1]['w']
w = prev_w + (t-prev_t)/(next_t-prev_t)*(next_w-prev_w) + next(self.normal_generator)*np.sqrt((next_t-t)*(t-prev_t)/(next_t-prev_t))
assert np.isfinite(w)
self.sample_points[t] = {'w': w}
return self.sample_points[t]['w']
def get_z(self, t1, t2):
raise NotImplementedError
class WienerWithZ:
'''
Class for sampling, and memorization of Wiener process and first nontrivial Stochastic integral.
'''
def __init__(self,seed=None):
self.sample_points = sortedcontainers.SortedDict()
self.sample_points[0.] = {
'w': 0.0,
'zToPrevPoint': 0.0
}
self.normal_generator = normal(seed=seed)
def get_w(self,t):
'''
Get value of Wiener probess at specified timestamp.
Parameters
----------
t: float
Time at which the process should be sampled. Has to be non-negative.
Returns
-------
float
Value of Wiener process at time ``t``.
Example
-------
>>> wiener = WienerWithZ()
>>> dW = wiener.get_w(1.0) - wiener.get_w(0.0)
>>> dW
0.321 #random value from N(0,1)
'''
self.ensure_sample_point(t)
return self.sample_points[t]['w']
def get_z(self,t1,t2):
'''
Get value of first nontrivial, primitive stochastic integral I(1,0),
(Kloden-Platen 10.4.2)
.. math :: Z_{t_1}^{t_2} = \int_{t_1}^{t_2} \int_{t_1}^{s_2} dW_{s_1} ds_2
Parameters
----------
t: float
Time at which the process should be sampled. Has to be non-negative.
Returns
-------
float
Value of Wiener process at time ``t``.
Example
-------
>>> wiener = WienerWithZ()
>>> dZ = wiener.get_z(0.0,0.1)
>>> dZ
0.321 #random value from N(0,1)
'''
if t1 >= t2:
raise ValueError
self.ensure_sample_point(t1)
self.ensure_sample_point(t2)
Z = 0
w1 = self.sample_points[t1]['w']
it_lower = self.sample_points.irange(t1, t2)
it_upper = self.sample_points.irange(t1, t2)
next(it_upper)
for t_upper in it_upper:
t_lower = next(it_lower)
Z += self.sample_points[t_upper]['zToPrevPoint']
dt = t_upper-t_lower
dw = self.sample_points[t_lower]['w'] - w1
Z += dw*dt
return Z
def ensure_sample_point(self,t):
'''
Ensures ``t`` is in the dictionary of sampled time instances. If not there yet samples new point
either to the right of all existing points or inbetween existing points.
'''
if t in self.sample_points.keys():
return
if t < 0:
raise ValueError
t_max = self.sample_points.keys()[-1]
if t > t_max:
#Kloden-Platen 10.4.3
# (4.3) dW = U1 sqrt(dt), dZ = 0.5 dt^(3/2) (U1 + 1/sqrt(3) U2)
tmpU1 = next(self.normal_generator)
tmpU2 = next(self.normal_generator)
tmpdt = t - t_max
tmpdW = tmpU1*math.sqrt(tmpdt)
tmpdZ = 0.5*math.pow(tmpdt,3.0/2.0)*(tmpU1 + (1.0 / math.sqrt(3))*tmpU2 )
self.sample_points[t] = {'w': self.sample_points[t_max]['w'] + tmpdW, 'zToPrevPoint': tmpdZ}
else:
#Somewhere inside sampled points
next_i = self.sample_points.bisect_left(t)
next_t = self.sample_points.peekitem(next_i)[0]
prev_t = self.sample_points.peekitem(next_i-1)[0]
next_w = self.sample_points.peekitem(next_i)[1]['w']
prev_w = self.sample_points.peekitem(next_i-1)[1]['w']
wt1t3 = next_w - prev_w
zt1t3 = self.sample_points.peekitem(next_i)[1]['zToPrevPoint']
(t1,t2,t3) = (prev_t,t,next_t)
i1 = t2-t1
i2 = t3-t2
I = t3-t1
varwt1t2 = i1*i2*(i1*i1-i1*i2+i2*i2)/(I*I*I);
cov = i1*i1*i2*i2*(i2-i1)/(2*I*I*I);
varzt1t2 = i1*i1*i1*i2*i2*i2/(3*I*I*I);
(wt1t2, zt1t2) = self._DrawCovaried(varwt1t2,cov,varzt1t2)
#Add conditional mean
wt1t2 += wt1t3*i1*(i1-2*i2)/(I*I) + zt1t3*6*i1*i2/(I*I*I)
zt1t2 += wt1t3*(-1)*i1*i1*i2/(I*I) + zt1t3*i1*i1*(i1+3*i2)/(I*I*I)
wt2 = prev_w + wt1t2
#Break Z integration interval into two segments
self.sample_points[next_t]['zToPrevPoint'] = (
self.sample_points.peekitem(next_i)[1]['zToPrevPoint']
- zt1t2
- (next_t-t)*(wt2-prev_w))
self.sample_points[t] = {'w' : wt2, 'zToPrevPoint' : zt1t2}
def _DrawCovaried(self,xx,xy,yy):
'''
Draws x,y from N(0,{{xx,xy},{xy,yy}}) distribution
'''
(x,y) = self._DrawCorrelated(xy/math.sqrt(xx*yy))
return (math.sqrt(xx)*x,math.sqrt(yy)*y)
def _DrawCorrelated(self,cor):
'''
Draws correalted normal samples with correlation ``cor``
'''
z1 = next(self.normal_generator)
z2 = next(self.normal_generator)
return (math.sqrt(1-cor*cor)*z1 + cor*z2,z2)
class VectorWiener:
'''
Class for sampling, and memorization of vector valued Wiener process.
Parameters
----------
noiseterms : int
Dimensionality of the vector process (i.e. number of independent Wiener processes).
Example
-------
>>> vw = pychastic.wiener.VectorWiener(2)
>>> vw.get_w(1)
array([0.21,-0.31]) # random, independent from N(0,1)
'''
def __init__(self,noiseterms : int):
self.sample_points = sortedcontainers.SortedDict()
self.noiseterms = noiseterms
self.sample_points[0.] = {
'w': np.array([0.0 for x in range(0,noiseterms)]),
}
self.normal_generator = normal()
def get_w(self, t):
'''
Get value of Wiener probess at specified timestamp.
Parameters
----------
t: float
Time at which the process should be sampled. Has to be non-negative.
Returns
-------
np.array
Value of Wiener processes at time ``t``.
Example
-------
>>> vw = VectorWiener(2)
>>> dW = vw.get_w(1.0) - vw.get_w(0.0)
>>> dW
array([0.321,-0.123]) #random, each from N(0,1)
'''
if t < 0:
raise ValueError('Negative timestamp')
if t in self.sample_points:
return self.sample_points[t]['w']
(t_max, last_values) = self.sample_points.peekitem() # last item is default
if t > t_max:
nvec = self.normal_generator.get_number_of_samples(self.noiseterms)
#nvec = np.array([next(self.normal_generator) for x in range(0,self.noiseterms)]) # slow :<
w_val = np.array(last_values['w'] + np.sqrt(t-t_max)*nvec)
self.sample_points[t] = {'w': w_val}
else:
#print(f'Called with t {t}, current t_max is {t_max}')
#print(self.sample_points)
raise NotImplementedError
return self.sample_points[t]['w']
def get_commuting_noise(self, t1, t2):
'''
Get value of commutative noise matrix (compare Kloden-Platen (10.3.15))
Define :math:`I_{jk}` as
.. math :: I_{jk}(t_1,t_2) = \int_{t_1}^{t_2} \int_{t_1}^{s_1} dW_j(s_2) dW_k(s_1)
Then for :math:`j \\neq k`
.. math :: I_{jk} + I_{kj} = \Delta W_j \Delta W_k
Parameters
----------
t1 : float
Lower bound of double stochastic integrals
t2 : float
Upper bound of double stochastic integrals
Returns
-------
np.array
Symmetric square matrix `noiseterms` by `noiseterms` containing :math:`I_{jk}` approximants as components.
'''
if t1 < 0 or t2 < 0:
raise ValueError
if t1 > t2:
raise ValueError
if (t1 in self.sample_points) and (t2 in self.sample_points):
dW = self.sample_points[t2]['w'] - self.sample_points[t1]['w']
dV = self.sample_points[t2]['w'] - self.sample_points[t1]['w']
prod = np.outer(dW,dV)
#halfdiag = np.oneslike(prod) - 0.5*np.identity(self.noiseterms)
#return prod*halfdiag - (t2-t1)*np.identity(self.noiseterms)
return 0.5*prod
t_max = self.sample_points.keys()[-1]
if t1 > t_max:
nvec = self.normal_generator.get_sample(self.noiseterms,n=self.noiseterms)
# nvec = np.array([next(self.normal_generator) for x in range(0,self.noiseterms)]) # slow :<
self.sample_points[t1] = {'w': self.sample_points[t_max]['w'] + np.sqrt(t1-t_max)*nvec}
elif t1 not in self.sample_points:
raise NotImplementedError
if t2 > t_max:
nvec = self.normal_generator.get_sample(self.noiseterms,n=self.noiseterms)
# nvec = np.array([next(self.normal_generator) for x in range(0,self.noiseterms)]) # slow :<
self.sample_points[t2] = {'w': self.sample_points[t_max]['w'] + np.sqrt(t2-t_max)*nvec}
elif t2 not in self.sample_points:
raise NotImplementedError
dW = self.sample_points[t2]['w'] - self.sample_points[t1]['w']
dV = self.sample_points[t2]['w'] - self.sample_points[t1]['w']
prod = np.outer(dW,dV)
#halfdiag = np.oneslike(prod) - 0.5*np.identity(self.noiseterms)
#return prod*halfdiag - (t2-t1)*np.identity(self.noiseterms)
return 0.5*prod
def get_commuting_noise_component(self, t1, t2, j, k):
'''
Get value of commutative noise component (compare Kloden-Platen (10.3.15)).
Define :math:`I_{jk}` as
.. math :: I_{jk}(t_1,t_2) = \int_{t_1}^{t_2} \int_{t_1}^{s_1} dW_j(s_2) dW_k(s_1)
Then for :math:`j \\neq k`
.. math :: I_{jk} + I_{kj} = \Delta W_j \Delta W_k
Parameters
----------
t1 : float
Lower bound of double stochastic integrals
t2 : float
Upper bound of double stochastic integrals
j : int
Index of the first of Wiener processes
k : int
Index of the second of Wiener processes
Returns
-------
float
Value of stochastic integral with specified time bounds.
'''
if t1 < 0 or t2 < 0:
raise ValueError
if t1 > t2:
raise ValueError
if (t1 in self.sample_points) and (t2 in self.sample_points):
dW = self.sample_points[t2]['w'][j] - self.sample_points[t1]['w'][j]
dV = self.sample_points[t2]['w'][k] - self.sample_points[t1]['w'][k]
if j != k:
return 0.5*dW*dV
else:
return 0.5*(dW*dW - (t2-t1))
t_max = self.sample_points.keys()[-1]
if t1 > t_max:
#nvec = np.array([next(self.normal_generator) for x in range(0,self.noiseterms)])
nvec = self.normal_generator.get_sample(self.noiseterms,n=self.noiseterms)
self.sample_points[t1] = {'w': self.sample_points[t_max]['w'] + | np.sqrt(t1-t_max) | numpy.sqrt |
import numpy as np
import conftest
from PathPlanning.DubinsPath import dubins_path_planning
np.random.seed(12345)
def check_edge_condition(px, py, pyaw, start_x, start_y, start_yaw, end_x,
end_y, end_yaw):
assert (abs(px[0] - start_x) <= 0.01)
assert (abs(py[0] - start_y) <= 0.01)
assert (abs(pyaw[0] - start_yaw) <= 0.01)
assert (abs(px[-1] - end_x) <= 0.01)
assert (abs(py[-1] - end_y) <= 0.01)
assert (abs(pyaw[-1] - end_yaw) <= 0.01)
def check_path_length(px, py, lengths):
path_len = sum(
[np.hypot(dx, dy) for (dx, dy) in zip(np.diff(px), np.diff(py))])
assert (abs(path_len - sum(lengths)) <= 0.1)
def test_1():
start_x = 1.0 # [m]
start_y = 1.0 # [m]
start_yaw = np.deg2rad(45.0) # [rad]
end_x = -3.0 # [m]
end_y = -3.0 # [m]
end_yaw = np.deg2rad(-45.0) # [rad]
curvature = 1.0
px, py, pyaw, mode, lengths = dubins_path_planning.dubins_path_planning(
start_x, start_y, start_yaw, end_x, end_y, end_yaw, curvature)
check_edge_condition(px, py, pyaw, start_x, start_y, start_yaw, end_x,
end_y, end_yaw)
check_path_length(px, py, lengths)
def test_2():
dubins_path_planning.show_animation = False
dubins_path_planning.main()
def test_3():
N_TEST = 10
for i in range(N_TEST):
start_x = (np.random.rand() - 0.5) * 10.0 # [m]
start_y = (np.random.rand() - 0.5) * 10.0 # [m]
start_yaw = np.deg2rad((np.random.rand() - 0.5) * 180.0) # [rad]
end_x = ( | np.random.rand() | numpy.random.rand |
"""
faerun.py
====================================
The main module containing the Faerun class.
"""
import math
import os
import copy
from typing import Union, Dict, Any, List, Tuple
from collections.abc import Iterable
import colour
import jinja2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import Colormap
from pandas import DataFrame
try:
from IPython.display import display, IFrame, FileLink
except Exception:
pass
class Faerun(object):
"""Creates a faerun object which is an empty plotting surface where
layers such as scatter plots can be added."""
def __init__(
self,
title: str = "",
clear_color: str = "#111111",
coords: bool = True,
coords_color: str = "#888888",
coords_box: bool = False,
coords_ticks: bool = True,
coords_grid: bool = False,
coords_tick_count: int = 10,
coords_tick_length: float = 2.0,
coords_offset: float = 5.0,
x_title: str = "",
y_title: str = "",
show_legend: bool = True,
legend_title: str = "Legend",
legend_orientation: str = "vertical",
legend_number_format: str = "{:.2f}",
view: str = "free",
scale: float = 750.0,
alpha_blending=False,
anti_aliasing=True,
style: Dict[str, Dict[str, Any]] = {},
impress: str = None,
thumbnail_width: int = 250,
):
"""Constructor for Faerun.
Keyword Arguments:
title (:obj:`str`, optional): The plot title
clear_color (:obj:`str`, optional): The background color of the plot
coords (:obj:`bool`, optional): Show the coordinate axes in the plot
coords_color (:obj:`str`, optional): The color of the coordinate axes
coords_box (:obj:`bool`, optional): Show a box around the coordinate axes
coords_tick (:obj:`bool`, optional): Show ticks on coordinate axes
coords_grid (:obj:`bool`, optional): Extend ticks to create a grid
coords_tick_count (:obj:`int`, optional): The number of ticks to display per axis
coords_tick_length (:obj:`float`, optional): The length of the coordinate ticks
coords_offset (:obj:`float`, optional): An offset added to the coordinate axes
x_title (:obj:`str`, optional): The title of the x-axis
y_title (:obj:`str`, optional): The title of the y-axis
show_legend (:obj:`bool`, optional): Whether or not to show the legend
legend_title (:obj:`str`, optional): The legend title
legend_orientation (:obj:`str`, optional): The orientation of the legend ('vertical' or 'horizontal')
legend_number_format (:obj:`str`, optional): A format string applied to the numbers displayed in the legend
view (:obj:`str`, optional): The view (front, back, top, bottom, left, right, free)
scale (:obj:`float`, optional): To what size to scale the coordinates (which are normalized)
alpha_blending (:obj:`bool`, optional): Whether to activate alpha blending (required for smoothCircle shader)
anti_aliasing (:obj:`bool`, optional): Whether to activate anti-aliasing. Might improve quality at the cost of (substantial) rendering performance
style (:obj:`Dict[str, Dict[str, Any]]`, optional): The css styles to apply to the HTML elements
impress (:obj:`str`, optional): A short message that is shown on the HTML page
thumbnail_width (:obj: `int`, optional): The width of the thumbnail images. Defaults to 250.
"""
self.title = title
self.clear_color = clear_color
self.coords = coords
self.coords_color = coords_color
self.coords_box = coords_box
self.coords_ticks = coords_ticks
self.coords_grid = coords_grid
self.coords_tick_count = coords_tick_count
self.coords_tick_length = coords_tick_length
self.coords_offset = coords_offset
self.x_title = x_title
self.y_title = y_title
self.show_legend = show_legend
self.legend_title = legend_title
self.legend_orientation = legend_orientation
self.legend_number_format = legend_number_format
self.view = view
self.scale = scale
self.alpha_blending = alpha_blending
self.anti_aliasing = anti_aliasing
self.style = style
self.impress = impress
self.thumbnail_width = thumbnail_width
self.trees = {}
self.trees_data = {}
self.scatters = {}
self.scatters_data = {}
# Defining the default style (css values)
default_style = {
"legend": {
"bottom": "10px",
"right": "10px",
"padding": "10px",
"border": "1px solid #262626",
"border-radius": "2px",
"background-color": "#111111",
"filter": "drop-shadow(0px 0px 10px rgba(0, 0, 0, 0.5))",
"color": "#eeeeee",
"font-family": "'Open Sans'",
},
"selected": {
"bottom": "10px",
"left": "10px",
"padding": "0px",
"border": "1px solid #262626",
"border-radius": "2px",
"background-color": "#111111",
"filter": "drop-shadow(0px 0px 10px rgba(0, 0, 0, 0.5))",
"color": "#eeeeee",
"font-family": "'Open Sans'",
},
"controls": {
"top": "10px",
"right": "10px",
"padding": "2px",
"border": "1px solid #262626",
"border-radius": "2px",
"background-color": "#111111",
"filter": "drop-shadow(0px 0px 10px rgba(0, 0, 0, 0.5))",
"color": "#eeeeee",
"font-family": "'Open Sans'",
},
"title": {
"padding-bottom": "20px",
"font-size": "1.0em",
"color": "#888888",
"font-family": "'Open Sans'",
},
"x-axis": {
"padding-top": "20px",
"font-size": "0.7em",
"color": "#888888",
"font-family": "'Open Sans'",
},
"y-axis": {
"padding-bottom": "20px",
"font-size": "0.7em",
"color": "#888888",
"font-family": "'Open Sans'",
"transform": "rotate(-90deg)",
},
"color-box": {"width": "15px", "height": "15px", "border": "solid 0px"},
"color-stripe": {"width": "15px", "height": "1px", "border": "solid 0px"},
"color-stripe": {"width": "15px", "height": "1px", "border": "solid 0px"},
"crosshair": {"background-color": "#fff"},
}
for key, _ in default_style.items():
if key in self.style:
default_style[key].update(self.style[key])
self.style = default_style
def add_tree(
self,
name: str,
data: Union[dict, DataFrame],
mapping: dict = {
"from": "from",
"to": "to",
"x": "x",
"y": "y",
"z": "z",
"c": "c",
},
color: str = "#666666",
colormap: Union[str, Colormap] = "plasma",
fog_intensity: float = 0.0,
point_helper: str = None,
):
"""Add a tree layer to the plot.
Arguments:
name (:obj:`str`): The name of the layer
data (:obj:`dict` or :obj:`DataFrame`): A Python dict or Pandas DataFrame containing the data
Keyword Arguments:
mapping (:obj:`dict`, optional): The keys which contain the data in the input dict or DataFrame
color (:obj:`str`, optional): The default color of the tree
colormap (:obj:`str` or :obj:`Colormap`, optional): The name of the colormap (can also be a matplotlib Colormap object)
fog_intensity (:obj:`float`, optional): The intensity of the distance fog
point_helper (:obj:`str`, optional): The name of the scatter layer to associate with this tree layer (the source of the coordinates)
"""
if point_helper is None and mapping["z"] not in data:
data[mapping["z"]] = [0] * len(data[mapping["x"]])
self.trees[name] = {
"name": name,
"color": color,
"fog_intensity": fog_intensity,
"mapping": mapping,
"colormap": colormap,
"point_helper": point_helper,
}
self.trees_data[name] = data
def add_scatter(
self,
name: str,
data: Union[Dict, DataFrame],
mapping: Dict = {
"x": "x",
"y": "y",
"z": "z",
"c": "c",
"cs": "cs",
"s": "s",
"labels": "labels",
"knn": "knn",
},
colormap: Union[str, Colormap, List[str], List[Colormap]] = "plasma",
shader: str = "sphere",
point_scale: float = 1.0,
max_point_size: float = 100.0,
fog_intensity: float = 0.0,
saturation_limit: Union[float, List[float]] = 0.2,
categorical: Union[bool, List[bool]] = False,
interactive: bool = True,
has_legend: bool = False,
legend_title: Union[str, List[str]] = None,
legend_labels: Union[Dict, List[Dict]] = None,
min_legend_label: Union[str, float, List[str], List[float]] = None,
max_legend_label: Union[str, float, List[str], List[float]] = None,
series_title: Union[str, List[str]] = None,
ondblclick: Union[str, List[str]] = None,
selected_labels: Union[List, List[List]] = None,
label_index: Union[int, List[int]] = 0,
title_index: Union[int, List[int]] = 0,
knn: List[List[int]] = [],
):
"""Add a scatter layer to the plot.
Arguments:
name (:obj:`str`): The name of the layer
data (:obj:`dict` or :obj:`DataFrame`): A Python dict or Pandas DataFrame containing the data
Keyword Arguments:
mapping (:obj:`dict`, optional): The keys which contain the data in the input dict or the column names in the pandas :obj:`DataFrame`
colormap (:obj:`str`, :obj:`Colormap`, :obj:`List[str]`, or :obj:`List[Colormap]` optional): The name of the colormap (can also be a matplotlib Colormap object). A list when visualizing multiple series
shader (:obj:`str`, optional): The name of the shader to use for the data point visualization
point_scale (:obj:`float`, optional): The relative size of the data points
max_point_size (:obj:`int`, optional): The maximum size of the data points when zooming in
fog_intensity (:obj:`float`, optional): The intensity of the distance fog
saturation_limit (:obj:`float` or :obj:`List[float]`, optional): The minimum saturation to avoid "gray soup". A list when visualizing multiple series
categorical (:obj:`bool` or :obj:`List[bool]`, optional): Whether this scatter layer is categorical. A list when visualizing multiple series
interactive (:obj:`bool`, optional): Whether this scatter layer is interactive
has_legend (:obj:`bool`, optional): Whether or not to draw a legend
legend_title (:obj:`str` or :obj:`List[str]`, optional): The title of the legend. A list when visualizing multiple series
legend_labels (:obj:`Dict` or :obj:`List[Dict]`, optional): A dict mapping values to legend labels. A list when visualizing multiple series
min_legend_label (:obj:`str`, :obj:`float`, :obj:`List[str]` or :obj:`List[float]`, optional): The label used for the miminum value in a ranged (non-categorical) legend. A list when visualizing multiple series
max_legend_label (:obj:`str`, :obj:`float`, :obj:`List[str]` or :obj:`List[float]`, optional): The label used for the maximum value in a ranged (non-categorical) legend. A list when visualizing multiple series
series_title (:obj:`str` or :obj:`List[str]`, optional): The name of the series (used when multiple properites supplied). A list when visualizing multiple series
ondblclick (:obj:`str` or :obj:`List[str]`, optional): A JavaScript snippet that is executed on double-clicking on a data point. A list when visualizing multiple series
selected_labels: (:obj:`Dict` or :obj:`List[Dict]`, optional): A list of label values to show in the selected box. A list when visualizing multiple series
label_index: (:obj:`int` or :obj:`List[int]`, optional): The index of the label value to use as the actual label (when __ is used to specify multiple values). A list when visualizing multiple series
title_index: (:obj:`int` or :obj:`List[int]`, optional): The index of the label value to use as the selected title (when __ is used to specify multiple values). A list when visualizing multiple series
"""
if mapping["z"] not in data:
data[mapping["z"]] = [0] * len(data[mapping["x"]])
if "pandas" in type(data).__module__:
data = data.to_dict("list")
data_c = data[mapping["c"]]
data_cs = data[mapping["c"]] if mapping["cs"] in data else None
data_s = data[mapping["s"]] if mapping["s"] in data else None
# Check whether the color ("c") are strings
if type(data_c[0]) is str:
raise ValueError('Strings are not valid values for "c".')
# In case there are multiple series defined
n_series = 1
if isinstance(data_c[0], Iterable):
n_series = len(data_c)
else:
data_c = [data_c]
if data_cs is not None and not isinstance(data_cs[0], Iterable):
data_cs = [data_cs]
if data_s is not None and not isinstance(data_s[0], Iterable):
data_s = [data_s]
# Make everything a list that isn't one (or a tuple)
colormap = Faerun.make_list(colormap)
saturation_limit = Faerun.make_list(saturation_limit)
categorical = Faerun.make_list(categorical)
legend_title = Faerun.make_list(legend_title)
legend_labels = Faerun.make_list(legend_labels, make_list_list=True)
min_legend_label = Faerun.make_list(min_legend_label)
max_legend_label = Faerun.make_list(max_legend_label)
series_title = Faerun.make_list(series_title)
ondblclick = Faerun.make_list(ondblclick)
selected_labels = Faerun.make_list(selected_labels, make_list_list=True)
label_index = Faerun.make_list(label_index)
title_index = Faerun.make_list(title_index)
# If any argument list is shorter than the number of series,
# repeat the last element
colormap = Faerun.expand_list(colormap, n_series)
saturation_limit = Faerun.expand_list(saturation_limit, n_series)
categorical = Faerun.expand_list(categorical, n_series)
legend_title = Faerun.expand_list(legend_title, n_series, with_none=True)
legend_labels = Faerun.expand_list(legend_labels, n_series, with_none=True)
min_legend_label = Faerun.expand_list(
min_legend_label, n_series, with_none=True
)
max_legend_label = Faerun.expand_list(
max_legend_label, n_series, with_none=True
)
series_title = Faerun.expand_list(series_title, n_series, with_value="Series")
ondblclick = Faerun.expand_list(ondblclick, n_series, with_none=True)
selected_labels = Faerun.expand_list(selected_labels, n_series)
label_index = Faerun.expand_list(label_index, n_series)
title_index = Faerun.expand_list(title_index, n_series)
# # The c and cs values in the data are a special case, as they should
# # never be expanded
# if type(data[mapping["c"]][0]) is not list and prop_len > 1:
# prop_len = 1
# elif:
# prop_len = len(data[mapping["c"]])
legend = [None] * n_series
is_range = [None] * n_series
min_c = [None] * n_series
max_c = [None] * n_series
for s in range(n_series):
min_c[s] = float(min(data_c[s]))
max_c[s] = float(max(data_c[s]))
len_c = len(data_c[s])
if min_legend_label[s] is None:
min_legend_label[s] = min_c[s]
if max_legend_label[s] is None:
max_legend_label[s] = max_c[s]
is_range[s] = False
if legend_title[s] is None:
legend_title[s] = name
# Prepare the legend
legend[s] = []
if has_legend:
legend_values = []
if categorical[s]:
if legend_labels[s]:
legend_values = legend_labels[s]
else:
legend_values = [(i, str(i)) for i in sorted(set(data_c[s]))]
else:
if legend_labels[s]:
legend_labels[s].reverse()
for value, label in legend_labels[s]:
legend_values.append(
[(value - min_c[s]) / (max_c[s] - min_c[s]), label]
)
else:
is_range[s] = True
for i, val in enumerate(np.linspace(1.0, 0.0, 99)):
legend_values.append(
[val, str(data_c[s][int(math.floor(len_c / 100 * i))])]
)
cmap = None
if isinstance(colormap[s], str):
cmap = plt.cm.get_cmap(colormap[s])
else:
cmap = colormap[s]
for value, label in legend_values:
legend[s].append([list(cmap(value)), label])
# Normalize the data to later get the correct colour maps
if not categorical[s]:
data_c[s] = np.array(data_c[s])
data_c[s] = (data_c[s] - min_c[s]) / (max_c[s] - min_c[s])
if mapping["cs"] in data and len(data_cs) > s:
data_cs[s] = np.array(data_cs[s])
min_cs = min(data_cs[s])
max_cs = max(data_cs[s])
# Avoid zero saturation by limiting the lower bound to 0.1
data_cs[s] = 1.0 - np.maximum(
saturation_limit[s],
np.array((data_cs[s] - min_cs) / (max_cs - min_cs)),
)
# Format numbers if parameters are indeed numbers
if isinstance(min_legend_label[s], (int, float)):
min_legend_label[s] = self.legend_number_format.format(
min_legend_label[s]
)
if isinstance(max_legend_label[s], (int, float)):
max_legend_label[s] = self.legend_number_format.format(
max_legend_label[s]
)
data[mapping["c"]] = data_c
if data_cs:
data[mapping["cs"]] = data_cs
if data_s:
data[mapping["s"]] = data_s
self.scatters[name] = {
"name": name,
"shader": shader,
"point_scale": point_scale,
"max_point_size": max_point_size,
"fog_intensity": fog_intensity,
"interactive": interactive,
"categorical": categorical,
"mapping": mapping,
"colormap": colormap,
"has_legend": has_legend,
"legend_title": legend_title,
"legend": legend,
"is_range": is_range,
"min_c": min_c,
"max_c": max_c,
"min_legend_label": min_legend_label,
"max_legend_label": max_legend_label,
"series_title": series_title,
"ondblclick": ondblclick,
"selected_labels": selected_labels,
"label_index": label_index,
"title_index": title_index,
}
self.scatters_data[name] = data
def plot(
self,
file_name: str = "index",
path: str = "./",
template: str = "default",
notebook_height: int = 500,
):
"""Plots the data to an HTML / JS file.
Keyword Arguments:
file_name (:obj:`str`, optional): The name of the HTML / JS file
path (:obj:`str`, optional): The path to which to write the HTML / JS file
template (:obj:`str`, optional): The name or path of the template to use
notebook_height: (:obj`int`, optional): The height of the plot when displayed in a jupyter notebook
"""
self.notebook_height = notebook_height
script_path = os.path.dirname(os.path.abspath(__file__))
if template in ["default", "reaction_smiles", "smiles", "url_image"]:
template = "template_" + template + ".j2"
else:
script_path = os.path.dirname(template)
html_path = os.path.join(path, file_name + ".html")
js_path = os.path.join(path, file_name + ".js")
jenv = jinja2.Environment(loader=jinja2.FileSystemLoader(script_path))
has_legend = False
for _, value in self.scatters.items():
if value["has_legend"]:
has_legend = True
break
if not self.show_legend:
has_legend = False
# Drop colormaps before passing them to the document, as they are
# not JSON serializable.
trees_copy = copy.deepcopy(self.trees)
scatters_copy = copy.deepcopy(self.scatters)
for key, _ in trees_copy.items():
del trees_copy[key]["colormap"]
for key, _ in scatters_copy.items():
del scatters_copy[key]["colormap"]
model = {
"title": self.title,
"file_name": file_name + ".js",
"clear_color": self.clear_color,
"view": self.view,
"coords": str(self.coords).lower(),
"coords_color": self.coords_color,
"coords_box": str(self.coords_box).lower(),
"coords_ticks": str(self.coords_ticks).lower(),
"coords_grid": str(self.coords_grid).lower(),
"coords_tick_count": self.coords_tick_count,
"coords_tick_length": self.coords_tick_length,
"coords_offset": self.coords_offset,
"x_title": self.x_title,
"y_title": self.y_title,
"tree_helpers": list(trees_copy.values()),
"point_helpers": list(scatters_copy.values()),
"has_legend": str(has_legend).lower(),
"legend_title": self.legend_title,
"legend_orientation": self.legend_orientation,
"alpha_blending": str(self.alpha_blending).lower(),
"anti_aliasing": str(self.anti_aliasing).lower(),
"style": self.style,
"impress": self.impress,
"in_notebook": Faerun.in_notebook(),
"thumbnail_width": self.thumbnail_width,
}
if Faerun.in_notebook():
model["data"] = self.create_data()
else:
with open(js_path, "w") as f:
f.write(self.create_data())
output_text = jenv.get_template(template).render(model)
with open(html_path, "w") as result_file:
result_file.write(output_text)
if Faerun.in_notebook():
display(IFrame(html_path, width="100%", height=self.notebook_height))
display(FileLink(html_path))
def get_min_max(self) -> tuple:
""" Get the minimum an maximum coordinates from this plotter instance
Returns:
:obj:`tuple`: The minimum and maximum coordinates
"""
minimum = float("inf")
maximum = float("-inf")
for name, data in self.scatters_data.items():
mapping = self.scatters[name]["mapping"]
min_x = float("inf")
min_y = float("inf")
min_z = float("inf")
max_x = float("-inf")
max_y = float("-inf")
max_z = float("-inf")
if mapping["x"] in data:
min_x = min(data[mapping["x"]])
max_x = max(data[mapping["x"]])
if mapping["y"] in data:
min_y = min(data[mapping["y"]])
max_y = max(data[mapping["y"]])
if mapping["z"] in data:
min_z = min(data[mapping["z"]])
max_z = max(data[mapping["z"]])
minimum = min(minimum, min([min_x, min_y, min_z]))
maximum = max(maximum, max([max_x, max_y, max_z]))
for name, data in self.trees_data.items():
if self.trees[name]["point_helper"] is None:
mapping = self.trees[name]["mapping"]
min_x = float("inf")
min_y = float("inf")
min_z = float("inf")
max_x = float("-inf")
max_y = float("-inf")
max_z = float("-inf")
if mapping["x"] in data:
min_x = min(data[mapping["x"]])
max_x = max(data[mapping["x"]])
if mapping["y"] in data:
min_y = min(data[mapping["y"]])
max_y = max(data[mapping["y"]])
if mapping["z"] in data:
min_z = min(data[mapping["z"]])
max_z = max(data[mapping["z"]])
minimum = min(minimum, min([min_x, min_y, min_z]))
maximum = max(maximum, max([max_x, max_y, max_z]))
return minimum, maximum
def create_python_data(self) -> dict:
"""Returns a Python dict containing the data
Returns:
:obj:`dict`: The data defined in this Faerun instance
"""
s = self.scale
minimum, maximum = self.get_min_max()
diff = maximum - minimum
output = {}
# Create the data for the scatters
for name, data in self.scatters_data.items():
mapping = self.scatters[name]["mapping"]
colormaps = self.scatters[name]["colormap"]
cmaps = [None] * len(colormaps)
for i, colormap in enumerate(colormaps):
if isinstance(colormap, str):
cmaps[i] = plt.cm.get_cmap(colormap)
else:
cmaps[i] = colormap
output[name] = {}
output[name]["meta"] = self.scatters[name]
output[name]["type"] = "scatter"
output[name]["x"] = np.array(
[s * (x - minimum) / diff for x in data[mapping["x"]]], dtype=np.float32
)
output[name]["y"] = np.array(
[s * (y - minimum) / diff for y in data[mapping["y"]]], dtype=np.float32
)
output[name]["z"] = np.array(
[s * (z - minimum) / diff for z in data[mapping["z"]]], dtype=np.float32
)
if mapping["labels"] in data:
# Make sure that the labels are always strings
output[name]["labels"] = list(map(str, data[mapping["labels"]]))
if mapping["s"] in data:
output[name]["s"] = np.array(data[mapping["s"]], dtype=np.float32)
output[name]["colors"] = [{}] * len(data[mapping["c"]])
for s in range(len(data[mapping["c"]])):
if mapping["cs"] in data:
colors = np.array([cmaps[s](x) for x in data[mapping["c"]][s]])
for i, c in enumerate(colors):
hsl = np.array(colour.rgb2hsl(c[:3]))
hsl[1] = hsl[1] - hsl[1] * data[mapping["cs"]][s][i]
colors[i] = np.append(np.array(colour.hsl2rgb(hsl)), 1.0)
colors = np.round(colors * 255.0)
output[name]["colors"][s]["r"] = np.array(
colors[:, 0], dtype=np.float32
)
output[name]["colors"][s]["g"] = np.array(
colors[:, 1], dtype=np.float32
)
output[name]["colors"][s]["b"] = np.array(
colors[:, 2], dtype=np.float32
)
else:
colors = np.array([cmaps[s](x) for x in data[mapping["c"]][s]])
colors = np.round(colors * 255.0)
output[name]["colors"][s]["r"] = np.array(
colors[:, 0], dtype=np.float32
)
output[name]["colors"][s]["g"] = np.array(
colors[:, 1], dtype=np.float32
)
output[name]["colors"][s]["b"] = np.array(
colors[:, 2], dtype=np.float32
)
for name, data in self.trees_data.items():
mapping = self.trees[name]["mapping"]
point_helper = self.trees[name]["point_helper"]
output[name] = {}
output[name]["meta"] = self.trees[name]
output[name]["type"] = "tree"
if point_helper is not None and point_helper in self.scatters_data:
scatter = self.scatters_data[point_helper]
scatter_mapping = self.scatters[point_helper]["mapping"]
x_t = []
y_t = []
z_t = []
for i in range(len(data[mapping["from"]])):
x_t.append(scatter[scatter_mapping["x"]][data[mapping["from"]][i]])
x_t.append(scatter[scatter_mapping["x"]][data[mapping["to"]][i]])
y_t.append(scatter[scatter_mapping["y"]][data[mapping["from"]][i]])
y_t.append(scatter[scatter_mapping["y"]][data[mapping["to"]][i]])
z_t.append(scatter[scatter_mapping["z"]][data[mapping["from"]][i]])
z_t.append(scatter[scatter_mapping["z"]][data[mapping["to"]][i]])
output[name]["x"] = np.array(
[s * (x - minimum) / diff for x in x_t], dtype=np.float32
)
output[name]["y"] = np.array(
[s * (y - minimum) / diff for y in y_t], dtype=np.float32
)
output[name]["z"] = np.array(
[s * (z - minimum) / diff for z in z_t], dtype=np.float32
)
else:
output[name]["x"] = np.array(
[s * (x - minimum) / diff for x in data[mapping["x"]]],
dtype=np.float32,
)
output[name]["y"] = np.array(
[s * (y - minimum) / diff for y in data[mapping["y"]]],
dtype=np.float32,
)
output[name]["z"] = np.array(
[s * (z - minimum) / diff for z in data[mapping["z"]]],
dtype=np.float32,
)
if mapping["c"] in data:
colormap = self.trees[name]["colormap"]
cmap = None
if isinstance(colormap, str):
cmap = plt.cm.get_cmap(colormap)
else:
cmap = colormap
colors = np.array([cmap(x) for x in data[mapping["c"]]])
colors = np.round(colors * 255.0)
output[name]["r"] = np.array(colors[:, 0], dtype=np.float32)
output[name]["g"] = | np.array(colors[:, 1], dtype=np.float32) | numpy.array |
import numpy as np
import os
import sys
import pandas as pd
import zipfile
import argparse
from tqdm import tqdm
from utils import *
from sklearn.model_selection import train_test_split
import h5py
np.random.seed(0)
def dataset_split(all_images, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
X_train, X_test = train_test_split(all_images, test_size=0.33, random_state=0)
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
print(X_train.shape, X_test.shape)
np.save(os.path.join(output_dir, 'train_ids.npy'), X_train)
np.save(os.path.join(output_dir, 'test_ids.npy'), X_test)
def find_single_attribute_ind(categories, attribute):
# attribute: Target attribute for binary classification
index = np.where(np.asarray(categories) == attribute)
index = index[0][0]
return index
def find_attribute_index(categories, attribute):
# attribute: Target attribute for binary classification
index_main = []
for a in attribute:
print(a)
index = np.where(np.asarray(categories) == a)
index = index[0][0]
index_main.append(index)
print(index_main)
return index_main
def save_processed_label_file(df, output_dir, attribute):
file_name = ''.join(attribute) + '_binary_classification.txt'
df.to_csv(os.path.join(output_dir, file_name), sep=' ', index=None, header=None)
print(df.shape)
one_line = str(df.shape[0]) + '\n'
second_line = ''.join(attribute) + "\n"
with open(os.path.join(output_dir, file_name), 'r+') as fp:
lines = fp.readlines() # lines is list of line, each element '...\n'
lines.insert(0, one_line) # you can use any index if you know the line index
lines.insert(1, second_line)
fp.seek(0) # file pointer locates at the beginning to write the whole file again
fp.writelines(lines)
# Write the label file for target attribute binary classification
def write_attribute_label_file(df, categories, attribute, output_dir):
index_main = find_attribute_index(categories, attribute)
# Train File
df_temp = df[['Image_Path'] + index_main]
save_processed_label_file(df_temp, output_dir, attribute)
# Read saved files
def read_saved_files(attribute, output_dir, image_dir):
file_name = ''.join(attribute) + '_binary_classification.txt'
categories, file_names_dict = read_data_file(os.path.join(output_dir, file_name), image_dir)
categories = np.asarray(categories).ravel()
print(categories)
print("Number of images: ", len(file_names_dict.keys()))
print("Few image names:")
list(file_names_dict.keys())[0:5]
label = file_names_dict[list(file_names_dict.keys())[0]]
print(type(label))
label = np.asarray(label)
print(label.ravel())
def prep_celeba(attributes=[['Smiling'], ['Young'], ['No_Beard'], ['Heavy_Makeup'], ['Black_Hair'], ['Bangs']]):
# final paths
celebA_dir = os.path.join('data', 'CelebA')
image_dir = os.path.join(celebA_dir, 'images')
txt_dir = os.path.join(celebA_dir, 'list_attr_celeba.txt')
print('Image Dir: ', image_dir)
print('Label File: ', txt_dir)
# Divide dataset into train and test set
all_images = os.listdir(image_dir)
dataset_split(all_images, celebA_dir)
# Read Label File
categories, file_names_dict = read_data_file(txt_dir)
categories = np.asarray(categories).ravel()
print(categories)
print("Number of images: ", len(file_names_dict.keys()))
label = file_names_dict[list(file_names_dict.keys())[0]]
print(type(label))
label = np.asarray(label)
print(label.ravel())
# Create Binary-Classification Data file
# Convert the dictionary: attr_list to a dataframe
df = pd.DataFrame(file_names_dict).T
df['Image_Path'] = df.index
for attribute in attributes:
write_attribute_label_file(df, categories, attribute, celebA_dir)
for attribute in attributes:
read_saved_files(attribute, celebA_dir, image_dir)
def prep_celeba_biased():
attribute = 'Smiling'
# Attribute is Smiling
# however, confounded with Young and Blond.
# Meaning that positive examples are also Young and Blond
# And negative examples are old and dark haired
# final paths
celebA_dir = os.path.join('data', 'CelebA')
image_dir = os.path.join(celebA_dir, 'images')
txt_dir = os.path.join(celebA_dir, 'list_attr_celeba.txt')
biased_celebA_dir = os.path.join(celebA_dir, 'biased')
if not os.path.exists(biased_celebA_dir):
os.makedirs(biased_celebA_dir)
print('Image Dir: ', image_dir)
print('Label File: ', txt_dir)
# Read Label File
categories, all_file_names_dict = read_data_file(txt_dir)
categories = np.asarray(categories).ravel()
file_names_dict = {}
for img in all_file_names_dict.keys():
smiling = all_file_names_dict[img][find_single_attribute_ind(categories, 'Smiling')]
young = all_file_names_dict[img][find_single_attribute_ind(categories, 'Young')]
blond = all_file_names_dict[img][find_single_attribute_ind(categories, 'Blond_Hair')]
if smiling == young and smiling == blond:
file_names_dict.update({img: all_file_names_dict[img]})
print(categories)
# Divide dataset into train and test set
all_images = list(file_names_dict.keys())
dataset_split(all_images, biased_celebA_dir)
print("Number of images: ", len(file_names_dict.keys()))
label = file_names_dict[list(file_names_dict.keys())[0]]
print(type(label))
label = np.asarray(label)
print(label.ravel())
# Create Binary-Classification Data file
# Convert the dictionary: attr_list to a dataframe
df = pd.DataFrame(file_names_dict).T
df['Image_Path'] = df.index
write_attribute_label_file(df, categories, [attribute], biased_celebA_dir)
def prep_celeba_biased_or():
attribute = 'Smiling'
# Attribute is Smiling
# however, confounded with Young and Blond.
# Meaning that positive examples are either smile + blond or smile+young
# And negative examples are not smiling + old + dark haired
# final paths
celebA_dir = os.path.join('data', 'CelebA')
image_dir = os.path.join(celebA_dir, 'images')
txt_dir = os.path.join(celebA_dir, 'list_attr_celeba.txt')
biased_celebA_dir = os.path.join(celebA_dir, 'biased_or')
if not os.path.exists(biased_celebA_dir):
os.makedirs(biased_celebA_dir)
print('Image Dir: ', image_dir)
print('Label File: ', txt_dir)
# Read Label File
categories, all_file_names_dict = read_data_file(txt_dir)
categories = np.asarray(categories).ravel()
file_names_dict = {}
for img in all_file_names_dict.keys():
smiling = all_file_names_dict[img][find_single_attribute_ind(categories, 'Smiling')]
bangs = all_file_names_dict[img][find_single_attribute_ind(categories, 'Bangs')]
blond = all_file_names_dict[img][find_single_attribute_ind(categories, 'Blond_Hair')]
if smiling == 1:
if bangs == 1 or blond == 1:
file_names_dict.update({img: all_file_names_dict[img]})
else:
if bangs == -1 and blond == -1:
if np.random.uniform() < 0.33:
file_names_dict.update({img: all_file_names_dict[img]})
print(categories)
# Divide dataset into train and test set
all_images = list(file_names_dict.keys())
dataset_split(all_images, biased_celebA_dir)
print("Number of images: ", len(file_names_dict.keys()))
label = file_names_dict[list(file_names_dict.keys())[0]]
print(type(label))
label = | np.asarray(label) | numpy.asarray |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import os, argparse
import csv
from run1 import get_params_office_world, get_params_traffic_world, get_params_craft_world
def smooth(y, box_pts):
box = np.ones(box_pts)/box_pts
y.append(sum(y[-5:])/len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y.append(sum(y[-5:]) / len(y[-5:]))
y_smooth = np.convolve(y[0:-5], box, mode='same')
y_smooth[-1] = y_smooth[-6]
y_smooth[-2] = y_smooth[-6]
y_smooth[-3] = y_smooth[-6]
y_smooth[-4] = y_smooth[-6]
y_smooth[-5] = y_smooth[-6]
return y_smooth
def export_results_traffic_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[0].num_steps
max_step = get_params_traffic_world('../experiments/traffic/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("traffic" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row, 25))
p50_q.append(np.percentile(row, 50))
p75_q.append(np.percentile(row, 75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append(np.percentile(row, 25))
p50_hrl.append(np.percentile(row, 50))
p75_hrl.append(np.percentile(row, 75))
elif 'dqn' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_dqn.append(np.percentile(row, 25))
p50_dqn.append(np.percentile(row, 50))
p75_dqn.append(np.percentile(row, 75))
elif 'rpni' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25.append(np.percentile(row, 25))
p50.append(np.percentile(row, 50))
p75.append(np.percentile(row, 75))
elif 'sat' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25s.append(np.percentile(row, 25))
p50s.append(np.percentile(row, 50))
p75s.append(np.percentile(row, 75))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25)-1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25)-1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s)-1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q)-1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl)-1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn)-1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_dqn)-1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
if algorithm == "all":
plt.xlim(0,max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '', '', 'D-DQN','','','HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
else:
step = 0
p25dict = dict()
p50dict = dict()
p75dict = dict()
p25sdict = dict()
p50sdict = dict()
p75sdict = dict()
p25_qdict = dict()
p50_qdict = dict()
p75_qdict = dict()
p25_hrldict = dict()
p50_hrldict = dict()
p75_hrldict = dict()
p25_dqndict = dict()
p50_dqndict = dict()
p75_dqndict = dict()
p25 = list()
p50 = list()
p75 = list()
p25s = list()
p50s = list()
p75s = list()
p25_q = list()
p50_q = list()
p75_q = list()
p25_hrl = list()
p50_hrl = list()
p75_hrl = list()
p25_dqn = list()
p50_dqn = list()
p75_dqn = list()
p25dict[0] = [0,0,0,0]
p50dict[0] = [0,0,0,0]
p75dict[0] = [0,0,0,0]
p25sdict[0] = [0,0,0,0]
p50sdict[0] = [0,0,0,0]
p75sdict[0] = [0,0,0,0]
p25_qdict[0] = [0,0,0,0]
p50_qdict[0] = [0,0,0,0]
p75_qdict[0] = [0,0,0,0]
p25_hrldict[0] = [0,0,0,0]
p50_hrldict[0] = [0,0,0,0]
p75_hrldict[0] = [0,0,0,0]
p25_dqndict[0] = [0,0,0,0]
p50_dqndict[0] = [0,0,0,0]
p75_dqndict[0] = [0,0,0,0]
files_dict = dict()
for file in files:
if (("traffic" in file) and (".csv" in file)):
if "1" in file:
task = 1
if "2" in file:
task = 2
if "3" in file:
task = 3
if "4" in file:
task = 4
if task not in files_dict:
files_dict[task] = [file]
else:
files_dict[task].append(file)
for task in files_dict:
for file in files_dict[task]:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_qdict:
p25_qdict[step].append(np.percentile(row, 25))
p50_qdict[step].append(np.percentile(row, 50))
p75_qdict[step].append(np.percentile(row, 75))
else:
p25_qdict[step] = [np.percentile(row, 25)]
p50_qdict[step] = [np.percentile(row, 50)]
p75_qdict[step] = [np.percentile(row, 75)]
elif 'hrl' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_hrldict:
p25_hrldict[step].append(np.percentile(row, 25))
p50_hrldict[step].append(np.percentile(row, 50))
p75_hrldict[step].append(np.percentile(row, 75))
else:
p25_hrldict[step] = [np.percentile(row, 25)]
p50_hrldict[step] = [np.percentile(row, 50)]
p75_hrldict[step] = [np.percentile(row, 75)]
elif 'dqn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_dqndict:
p25_dqndict[step].append(np.percentile(row, 25))
p50_dqndict[step].append(np.percentile(row, 50))
p75_dqndict[step].append(np.percentile(row, 75))
else:
p25_dqndict[step] = [np.percentile(row, 25)]
p50_dqndict[step] = [np.percentile(row, 50)]
p75_dqndict[step] = [np.percentile(row, 75)]
elif 'rpni' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25dict:
p25dict[step].append(np.percentile(row, 25))
p50dict[step].append(np.percentile(row, 50))
p75dict[step].append(np.percentile(row, 75))
else:
p25dict[step] = [np.percentile(row, 25)]
p50dict[step] = [np.percentile(row, 50)]
p75dict[step] = [np.percentile(row, 75)]
elif 'sat' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25sdict:
p25sdict[step].append(np.percentile(row, 25))
p50sdict[step].append(np.percentile(row, 50))
p75sdict[step].append(np.percentile(row, 75))
else:
p25sdict[step] = [np.percentile(row, 25)]
p50sdict[step] = [np.percentile(row, 50)]
p75sdict[step] = [np.percentile(row, 75)]
for step in steps:
if step in p25_qdict:
p25_q.append(sum(p25_qdict[step]) / len(p25_qdict[step]))
p50_q.append(sum(p50_qdict[step]) / len(p50_qdict[step]))
p75_q.append(sum(p75_qdict[step]) / len(p75_qdict[step]))
if step in p25_hrldict:
p25_hrl.append(sum(p25_hrldict[step]) / len(p25_hrldict[step]))
p50_hrl.append(sum(p50_hrldict[step]) / len(p50_hrldict[step]))
p75_hrl.append(sum(p75_hrldict[step]) / len(p75_hrldict[step]))
if step in p25dict:
p25.append(sum(p25dict[step]) / len(p25dict[step]))
p50.append(sum(p50dict[step]) / len(p50dict[step]))
p75.append(sum(p75dict[step]) / len(p75dict[step]))
if step in p25sdict:
p25s.append(sum(p25sdict[step]) / len(p25sdict[step]))
p50s.append(sum(p50sdict[step]) / len(p50sdict[step]))
p75s.append(sum(p75sdict[step]) / len(p75sdict[step]))
if step in p25_dqndict:
p25_dqn.append(sum(p25_dqndict[step]) / len(p25_dqndict[step]))
p50_dqn.append(sum(p50_dqndict[step]) / len(p50_dqndict[step]))
p75_dqn.append(sum(p75_dqndict[step]) / len(p75_dqndict[step]))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25) - 1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25) - 1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s) - 1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q) - 1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn) - 1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl) - 1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
plt.xlim(0, max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '','','D-DQN','', '', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
def export_results_office_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_office_world('../experiments/office/tests/ground_truth.txt')[0].num_steps
max_step = get_params_office_world('../experiments/office/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("office" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row, 25))
p50_q.append(np.percentile(row, 50))
p75_q.append(np.percentile(row, 75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append(np.percentile(row, 25))
p50_hrl.append(np.percentile(row, 50))
p75_hrl.append(np.percentile(row, 75))
elif 'dqn' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_dqn.append(np.percentile(row, 25))
p50_dqn.append(np.percentile(row, 50))
p75_dqn.append(np.percentile(row, 75))
elif 'rpni' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25.append(np.percentile(row, 25))
p50.append(np.percentile(row, 50))
p75.append(np.percentile(row, 75))
elif 'sat' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25s.append(np.percentile(row, 25))
p50s.append(np.percentile(row, 50))
p75s.append(np.percentile(row, 75))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25)-1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25)-1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s)-1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q)-1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl)-1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn)-1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_dqn)-1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
plt.xlim(0, max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '', '','D-DQN','','', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
else:
step = 0
p25dict = dict()
p50dict = dict()
p75dict = dict()
p25sdict = dict()
p50sdict = dict()
p75sdict = dict()
p25_qdict = dict()
p50_qdict = dict()
p75_qdict = dict()
p25_hrldict = dict()
p50_hrldict = dict()
p75_hrldict = dict()
p25_dqndict = dict()
p50_dqndict = dict()
p75_dqndict = dict()
p25 = list()
p50 = list()
p75 = list()
p25s = list()
p50s = list()
p75s = list()
p25_q = list()
p50_q = list()
p75_q = list()
p25_hrl = list()
p50_hrl = list()
p75_hrl = list()
p25_dqn = list()
p50_dqn = list()
p75_dqn = list()
p25dict[0] = [0,0,0,0]
p50dict[0] = [0,0,0,0]
p75dict[0] = [0,0,0,0]
p25sdict[0] = [0,0,0,0]
p50sdict[0] = [0,0,0,0]
p75sdict[0] = [0,0,0,0]
p25_qdict[0] = [0,0,0,0]
p50_qdict[0] = [0,0,0,0]
p75_qdict[0] = [0,0,0,0]
p25_hrldict[0] = [0,0,0,0]
p50_hrldict[0] = [0,0,0,0]
p75_hrldict[0] = [0,0,0,0]
p25_dqndict[0] = [0,0,0,0]
p50_dqndict[0] = [0,0,0,0]
p75_dqndict[0] = [0,0,0,0]
files_dict = dict()
for file in files:
if (("office" in file) and (".csv" in file)):
if "1" in file:
task = 1
if "2" in file:
task = 2
if "3" in file:
task = 3
if "4" in file:
task = 4
if task not in files_dict:
files_dict[task] = [file]
else:
files_dict[task].append(file)
for task in files_dict:
for file in files_dict[task]:
file_str = ("../plotdata/") + file
if 'qlearn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_qdict:
p25_qdict[step].append(np.percentile(row, 25))
p50_qdict[step].append(np.percentile(row, 50))
p75_qdict[step].append(np.percentile(row, 75))
else:
p25_qdict[step] = [np.percentile(row, 25)]
p50_qdict[step] = [np.percentile(row, 50)]
p75_qdict[step] = [np.percentile(row, 75)]
elif 'hrl' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_hrldict:
p25_hrldict[step].append(np.percentile(row, 25))
p50_hrldict[step].append(np.percentile(row, 50))
p75_hrldict[step].append(np.percentile(row, 75))
else:
p25_hrldict[step] = [np.percentile(row, 25)]
p50_hrldict[step] = [np.percentile(row, 50)]
p75_hrldict[step] = [np.percentile(row, 75)]
elif 'dqn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_dqndict:
p25_dqndict[step].append(np.percentile(row, 25))
p50_dqndict[step].append(np.percentile(row, 50))
p75_dqndict[step].append(np.percentile(row, 75))
else:
p25_dqndict[step] = [np.percentile(row, 25)]
p50_dqndict[step] = [np.percentile(row, 50)]
p75_dqndict[step] = [np.percentile(row, 75)]
elif 'rpni' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25dict:
p25dict[step].append(np.percentile(row, 25))
p50dict[step].append(np.percentile(row, 50))
p75dict[step].append(np.percentile(row, 75))
else:
p25dict[step] = [np.percentile(row, 25)]
p50dict[step] = [np.percentile(row, 50)]
p75dict[step] = [np.percentile(row, 75)]
elif 'sat' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25sdict:
p25sdict[step].append(np.percentile(row, 25))
p50sdict[step].append(np.percentile(row, 50))
p75sdict[step].append(np.percentile(row, 75))
else:
p25sdict[step] = [np.percentile(row, 25)]
p50sdict[step] = [np.percentile(row, 50)]
p75sdict[step] = [np.percentile(row, 75)]
for step in steps:
if step in p25_qdict:
p25_q.append(sum(p25_qdict[step])/len(p25_qdict[step]))
p50_q.append(sum(p50_qdict[step])/len(p50_qdict[step]))
p75_q.append(sum(p75_qdict[step])/len(p75_qdict[step]))
if step in p25_hrldict:
p25_hrl.append(sum(p25_hrldict[step])/len(p25_hrldict[step]))
p50_hrl.append(sum(p50_hrldict[step])/len(p50_hrldict[step]))
p75_hrl.append(sum(p75_hrldict[step])/len(p75_hrldict[step]))
if step in p25dict:
p25.append(sum(p25dict[step])/len(p25dict[step]))
p50.append(sum(p50dict[step])/len(p50dict[step]))
p75.append(sum(p75dict[step])/len(p75dict[step]))
if step in p25sdict:
p25s.append(sum(p25sdict[step])/len(p25sdict[step]))
p50s.append(sum(p50sdict[step])/len(p50sdict[step]))
p75s.append(sum(p75sdict[step])/len(p75sdict[step]))
if step in p25_dqndict:
p25_dqn.append(sum(p25_dqndict[step]) / len(p25_dqndict[step]))
p50_dqn.append(sum(p50_dqndict[step]) / len(p50_dqndict[step]))
p75_dqn.append(sum(p75_dqndict[step]) / len(p75_dqndict[step]))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25) - 1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25) - 1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s) - 1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn) - 1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q) - 1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl) - 1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
ax.grid()
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
if algorithm == "all":
plt.xlim(0,max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '', '','D-DQN','','', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.32), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
def export_results_craft_world(task_id, algorithm):
files = os.listdir("../plotdata/")
step_unit = get_params_craft_world('../experiments/craft/tests/ground_truth.txt')[0].num_steps
max_step = get_params_craft_world('../experiments/craft/tests/ground_truth.txt')[3].total_steps
steps = np.linspace(0, max_step, (max_step / step_unit) + 1, endpoint=True)
if task_id>0:
p25 = [0]
p50 = [0]
p75 = [0]
p25s = [0]
p50s = [0]
p75s = [0]
p25_q = [0]
p50_q = [0]
p75_q = [0]
p25_hrl = [0]
p50_hrl = [0]
p75_hrl = [0]
p25_dqn = [0]
p50_dqn = [0]
p75_dqn = [0]
files_of_interest = list()
for file in files:
if (("craft" in file) and (".csv" in file) and (str(task_id) in file)):
files_of_interest.append(file)
for file in files_of_interest:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_q.append(np.percentile(row,25))
p50_q.append(np.percentile(row,50))
p75_q.append(np.percentile(row,75))
elif 'hrl' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_hrl.append(np.percentile(row,25))
p50_hrl.append(np.percentile(row,50))
p75_hrl.append(np.percentile(row,75))
elif 'dqn' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25_dqn.append(np.percentile(row, 25))
p50_dqn.append(np.percentile(row, 50))
p75_dqn.append(np.percentile(row, 75))
elif 'rpni' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25.append(np.percentile(row,25))
p50.append(np.percentile(row,50))
p75.append(np.percentile(row,75))
elif 'sat' in file:
with open(file_str) as csvfile:
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
p25s.append(np.percentile(row,25))
p50s.append(np.percentile(row,50))
p75s.append(np.percentile(row,75))
fig, ax = plt.subplots()
fig.set_figheight(6)
fig.set_figwidth(8)
if algorithm == "jirprpni" or algorithm == "all":
p25 = smooth(p25, 5)
p50 = smooth(p50, 5)
p75 = smooth(p75, 5)
steps = np.linspace(0, (len(p25)-1) * step_unit, len(p25), endpoint=True)
plt.xlim(0, (len(p25)-1) * step_unit)
ax.plot(steps, p25, alpha=0)
ax.plot(steps, p50, color='black', label='JIRP RPNI')
ax.plot(steps, p75, alpha=0)
plt.fill_between(steps, p50, p25, color='black', alpha=0.25)
plt.fill_between(steps, p50, p75, color='black', alpha=0.25)
if algorithm == "jirpsat" or algorithm == "all":
p25s = smooth(p25s, 5)
p50s = smooth(p50s, 5)
p75s = smooth(p75s, 5)
steps = np.linspace(0, (len(p25s)-1) * step_unit, len(p25s), endpoint=True)
plt.xlim(0, (len(p25s) - 1) * step_unit)
ax.plot(steps, p25s, alpha=0)
ax.plot(steps, p50s, color='green', label='JIRP SAT')
ax.plot(steps, p75s, alpha=0)
plt.fill_between(steps, p50s, p25s, color='green', alpha=0.25)
plt.fill_between(steps, p50s, p75s, color='green', alpha=0.25)
if algorithm == "qlearning" or algorithm == "all":
p25_q = smooth(p25_q, 5)
p50_q = smooth(p50_q, 5)
p75_q = smooth(p75_q, 5)
steps = np.linspace(0, (len(p25_q)-1) * step_unit, len(p25_q), endpoint=True)
plt.xlim(0, (len(p25_q) - 1) * step_unit)
ax.plot(steps, p25_q, alpha=0)
ax.plot(steps, p50_q, color='red', label='QAS')
ax.plot(steps, p75_q, alpha=0)
plt.fill_between(steps, p50_q, p25_q, color='red', alpha=0.25)
plt.fill_between(steps, p50_q, p75_q, color='red', alpha=0.25)
if algorithm == "hrl" or algorithm == "all":
p25_hrl = smooth(p25_hrl, 5)
p50_hrl = smooth(p50_hrl, 5)
p75_hrl = smooth(p75_hrl, 5)
steps = np.linspace(0, (len(p25_hrl)-1) * step_unit, len(p25_hrl), endpoint=True)
plt.xlim(0, (len(p25_hrl) - 1) * step_unit)
ax.plot(steps, p25_hrl, alpha=0)
ax.plot(steps, p50_hrl, color='blue', label='HRL')
ax.plot(steps, p75_hrl, alpha=0)
plt.fill_between(steps, p50_hrl, p25_hrl, color='blue', alpha=0.25)
plt.fill_between(steps, p50_hrl, p75_hrl, color='blue', alpha=0.25)
if algorithm == "ddqn" or algorithm == "all":
p25_dqn = smooth(p25_dqn, 5)
p50_dqn = smooth(p50_dqn, 5)
p75_dqn = smooth(p75_dqn, 5)
steps = np.linspace(0, (len(p25_dqn)-1) * step_unit, len(p25_dqn), endpoint=True)
plt.xlim(0, (len(p25_dqn)-1) * step_unit)
ax.plot(steps, p25_dqn, alpha=0)
ax.plot(steps, p50_dqn, color='purple', label='D-DQN')
ax.plot(steps, p75_dqn, alpha=0)
plt.fill_between(steps, p50_dqn, p25_dqn, color='purple', alpha=0.25)
plt.fill_between(steps, p50_dqn, p75_dqn, color='purple', alpha=0.25)
ax.grid()
if algorithm == "all":
plt.xlim(0,max_step)
ax.set_xlabel('number of training steps', fontsize=22)
ax.set_ylabel('reward', fontsize=22)
plt.ylim(-0.1, 1.1)
plt.xlim(0, max_step)
plt.locator_params(axis='x', nbins=5)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1])
plt.gcf().subplots_adjust(bottom=0.15)
plt.gca().legend(('', 'JIRP RPNI', '', '', 'JIRP SAT', '', '', 'QAS', '','','D-DQN','','', 'HRL', ''))
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.8), prop={'size': 14})
ax.tick_params(axis='both', which='major', labelsize=22)
plt.savefig('../plotdata/figure.png', dpi=600)
plt.show()
else:
step = 0
p25dict = dict()
p50dict = dict()
p75dict = dict()
p25sdict = dict()
p50sdict = dict()
p75sdict = dict()
p25_qdict = dict()
p50_qdict = dict()
p75_qdict = dict()
p25_hrldict = dict()
p50_hrldict = dict()
p75_hrldict = dict()
p25_dqndict = dict()
p50_dqndict = dict()
p75_dqndict = dict()
p25 = list()
p50 = list()
p75 = list()
p25s = list()
p50s = list()
p75s = list()
p25_q = list()
p50_q = list()
p75_q = list()
p25_hrl = list()
p50_hrl = list()
p75_hrl = list()
p25_dqn = list()
p50_dqn = list()
p75_dqn = list()
p25dict[0] = [0,0,0,0]
p50dict[0] = [0,0,0,0]
p75dict[0] = [0,0,0,0]
p25sdict[0] = [0,0,0,0]
p50sdict[0] = [0,0,0,0]
p75sdict[0] = [0,0,0,0]
p25_qdict[0] = [0,0,0,0]
p50_qdict[0] = [0,0,0,0]
p75_qdict[0] = [0,0,0,0]
p25_hrldict[0] = [0,0,0,0]
p50_hrldict[0] = [0,0,0,0]
p75_hrldict[0] = [0,0,0,0]
p25_dqndict[0] = [0,0,0,0]
p50_dqndict[0] = [0,0,0,0]
p75_dqndict[0] = [0,0,0,0]
files_dict = dict()
for file in files:
if (("craft" in file) and (".csv" in file)):
if "1" in file:
task = 1
if "2" in file:
task = 2
if "3" in file:
task = 3
if "4" in file:
task = 4
if task not in files_dict:
files_dict[task] = [file]
else:
files_dict[task].append(file)
for task in files_dict:
for file in files_dict[task]:
file_str = ("../plotdata/") + file
if 'qlearning' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_qdict:
p25_qdict[step].append(np.percentile(row, 25))
p50_qdict[step].append(np.percentile(row, 50))
p75_qdict[step].append(np.percentile(row, 75))
else:
p25_qdict[step] = [np.percentile(row, 25)]
p50_qdict[step] = [np.percentile(row, 50)]
p75_qdict[step] = [np.percentile(row, 75)]
elif 'hrl' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_hrldict:
p25_hrldict[step].append(np.percentile(row, 25))
p50_hrldict[step].append(np.percentile(row, 50))
p75_hrldict[step].append(np.percentile(row, 75))
else:
p25_hrldict[step] = [np.percentile(row, 25)]
p50_hrldict[step] = [np.percentile(row, 50)]
p75_hrldict[step] = [np.percentile(row, 75)]
elif 'dqn' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_) > 1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25_dqndict:
p25_dqndict[step].append(np.percentile(row, 25))
p50_dqndict[step].append(np.percentile(row, 50))
p75_dqndict[step].append(np.percentile(row, 75))
else:
p25_dqndict[step] = [np.percentile(row, 25)]
p50_dqndict[step] = [np.percentile(row, 50)]
p75_dqndict[step] = [np.percentile(row, 75)]
elif 'rpni' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25dict:
p25dict[step].append(np.percentile(row, 25))
p50dict[step].append(np.percentile(row, 50))
p75dict[step].append(np.percentile(row, 75))
else:
p25dict[step] = [np.percentile(row, 25)]
p50dict[step] = [np.percentile(row, 50)]
p75dict[step] = [np.percentile(row, 75)]
elif 'sat' in file:
with open(file_str) as csvfile:
step = 0
readcsv = csv.reader(csvfile)
for row_ in readcsv:
if len(row_)>1:
row = list(map(int, row_))
else:
row = [float(row_[0])]
step += step_unit
if step in p25sdict:
p25sdict[step].append(np.percentile(row, 25))
p50sdict[step].append(np.percentile(row, 50))
p75sdict[step].append(np.percentile(row, 75))
else:
p25sdict[step] = [np.percentile(row, 25)]
p50sdict[step] = [np.percentile(row, 50)]
p75sdict[step] = [ | np.percentile(row, 75) | numpy.percentile |
# -*- coding: utf-8 -*-
"""
Module to manipulate, analyze and visualize structural geology data.
"""
from __future__ import division, print_function
from copy import deepcopy
import warnings
import pickle
import numpy as np
import matplotlib.pyplot as plt
from apsg.helpers import (
KentDistribution,
sind,
cosd,
acosd,
asind,
atand,
atan2d,
angle_metric,
l2v,
getldd,
_linear_inverse_kamb,
_square_inverse_kamb,
_schmidt_count,
_kamb_count,
_exponential_kamb,
)
__all__ = (
"Vec3",
"Lin",
"Fol",
"Pair",
"Fault",
"Group",
"PairSet",
"FaultSet",
"Cluster",
"StereoGrid",
"G",
"settings",
)
# Default module settings (singleton).
settings = dict(notation="dd", # Default notation for Fol dd or rhr
vec2dd=False, # Show Vec3 as plunge direction and plunge
precision=1e-12, # Numerical precision for comparism
figsize=(8, 6)) # Default figure size
class Vec3(np.ndarray):
"""
``Vec3`` is base class to store 3-dimensional vectors derived from
``numpy.ndarray`` on which ``Lin`` and ``Fol`` classes are based.
``Vec3`` support most of common vector algebra using following operators
- ``+`` - vector addition
- ``-`` - vector subtraction
- ``*`` - dot product
- ``**`` - cross product
- ``abs`` - magnitude (length) of vector
Check following methods and properties for additional operations.
Args:
arr (array_like):
Input data that or can be converted to an array.
This includes lists, tuples, and ndarrays. When more than one
argument is passed (i.e. `inc` is not `None`) `arr` is interpreted
as dip direction of the vector in degrees.
inc (float):
`None` or dip of the vector in degrees.
mag (float):
The magnitude of the vector if `inc` is not `None`.
Returns:
``Vec3`` object
Example:
>>> v = Vec3([1, -2, 3])
>>> abs(v)
3.7416573867739413
# The dip direction and dip angle of vector with magnitude of 1 and 3.
>>> v = Vec3(120, 60)
>>> abs(v)
1.0
>>> v = Vec3(120, 60, 3)
>>> abs(v)
3.0
"""
def __new__(cls, arr, inc=None, mag=1.0):
if inc is None:
obj = np.asarray(arr).view(cls)
else:
obj = mag * Lin(arr, inc).view(cls)
return obj
def __repr__(self):
if settings["vec2dd"]:
result = "V:{:.0f}/{:.0f}".format(*self.dd)
else:
result = "V({:.3f}, {:.3f}, {:.3f})".format(*self)
return result
def __str__(self):
return repr(self)
def __mul__(self, other):
"""
Return the dot product of two vectors.
"""
return np.dot(self, other) # What about `numpy.inner`?
def __abs__(self):
"""
Return the 2-norm or Euclidean norm of vector.
"""
return np.linalg.norm(self)
def __pow__(self, other):
"""
Return cross product if argument is vector or power of vector.
"""
if np.isscalar(other):
return pow(abs(self), other)
else:
return self.cross(other)
def __eq__(self, other):
"""
Return `True` if vectors are equal, otherwise `False`.
"""
if not isinstance(other, self.__class__):
return False
return self is other or abs(self - other) < settings["precision"]
def __ne__(self, other):
"""
Return `True` if vectors are not equal, otherwise `False`.
Overrides the default implementation (unnecessary in Python 3).
"""
return not self == other
def __hash__(self):
return NotImplementedError
@classmethod
def rand(cls):
"""
Random unit vector from distribution on sphere
"""
return cls(np.random.randn(3)).uv
@property
def type(self):
"""
Return the type of ``self``.
"""
return type(self)
@property
def upper(self):
"""
Return `True` if z-coordinate is negative, otherwise `False`.
"""
return np.sign(self[2]) < 0
@property
def flip(self):
"""
Return a new vector with inverted `z` coordinate.
"""
return Vec3((self[0], self[1], -self[2]))
@property
def uv(self):
"""
Normalize the vector to unit length.
Returns:
unit vector of ``self``
Example:
>>> u = Vec3([1,1,1])
>>> u.uv
V(0.577, 0.577, 0.577)
"""
return self / abs(self)
def cross(self, other):
"""
Calculate the cross product of two vectors.
Args:
other: other ``Vec3`` vector
Returns:
The cross product of `self` and `other`.
Example:
>>> v = Vec3([1, 0, 0])
>>> u = Vec3([0, 0, 1])
>>> v.cross(u)
V(0.000, -1.000, 0.000)
"""
return Vec3(np.cross(self, other))
def angle(self, other):
"""
Calculate the angle between two vectors in degrees.
Args:
other: other ``Vec3`` vector
Returns:
The angle between `self` and `other` in degrees.
Example:
>>> v = Vec3([1, 0, 0])
>>> u = Vec3([0, 0, 1])
>>> v.angle(u)
90.0
"""
if isinstance(other, Group):
return other.angle(self)
else:
return acosd(np.clip(np.dot(self.uv, other.uv), -1, 1))
def rotate(self, axis, angle):
"""
Return rotated vector about axis.
Args:
axis (``Vec3``): axis of rotation
angle (float): angle of rotation in degrees
Returns:
vector represenatation of `self` rotated `angle` degrees about
vector `axis`. Rotation is clockwise along axis direction.
Example:
# Rotate `e1` vector around `z` axis.
>>> u = Vec3([1, 0, 0])
>>> z = Vec3([0, 0, 1])
>>> u.rotate(z, 90)
V(0.000, 1.000, 0.000)
"""
e = Vec3(self) # rotate all types as vectors
k = axis.uv
r = cosd(angle) * e + sind(angle) * k.cross(e) + (1 - cosd(angle)) * k * (k * e)
return r.view(type(self))
def proj(self, other):
"""
Return projection of vector `u` onto vector `v`.
Args:
other (``Vec3``): other vector
Returns:
vector representation of `self` projected onto 'other'
Example:
>> u.proj(v)
Note:
To project on plane use: `u - u.proj(v)`, where `v` is plane normal.
"""
r = | np.dot(self, other) | numpy.dot |
import os
import sys
import imp
import argparse
import time
import math
import numpy as np
from utils import utils
from utils.imageprocessing import preprocess
from utils.dataset import Dataset
from network import Network
from evaluation.lfw import LFWTest
def main(args):
paths = [
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0002.jpg',
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0003.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\a-000013.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0002.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
r'F:\data\face-recognition\lfw\lfw-112-mxnet\Abdoulaye_Wade\Abdoulaye_Wade_0003.jpg',
r'F:\data\face-recognition\realsense\data-labeled-clean-strict2-112-mxnet\rgb\001-chenkai\rgb_2.jpg',
]
print('%d images to load.' % len(paths))
assert(len(paths)>0)
# Load model files and config file
network = Network()
network.load_model(args.model_dir)
# network.config.preprocess_train = []
# network.config.preprocess_test = []
images = preprocess(paths, network.config, False)
import cv2
# images = np.array([cv2.resize(img, (96, 96)) for img in images])
# images = (images - 128.) / 128.
# images = images[..., ::-1]
print(images.shape)
# print(images[0,:5,:5,0])
# Run forward pass to calculate embeddings
mu, sigma_sq = network.extract_feature(images, args.batch_size, verbose=True)
print(mu.shape, sigma_sq.shape)
print('sigma_sq', np.max(sigma_sq), np.min(sigma_sq), np.mean(sigma_sq), np.exp(np.mean(np.log(sigma_sq))))
log_sigma_sq = np.log(sigma_sq)
print('log_sigma_sq', np.max(log_sigma_sq), | np.min(log_sigma_sq) | numpy.min |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Imaging
improve:
reinit, uncert,
rand_norm, rand_splitnorm, rand_pointing,
slice, slice_inv_sq, crop, rebin, groupixel
smooth, artifact, mask
Jy_per_pix_to_MJy_per_sr(improve):
header, image, wave
iuncert(improve):
unc
islice(improve):
image, wave, filenames, clean
icrop(improve):
header, image, wave
irebin(improve):
header, image, wave
igroupixel(improve):
header, image, wave
ismooth(improve):
header, image, wave
imontage(improve):
reproject, reproject_mc, coadd, clean
iswarp(improve):
footprint, combine, combine_mc, clean
iconvolve(improve):
spitzer_irs, choker, do_conv, image, wave,
filenames, clean
cupid(improve):
spec_build, sav_build,
header, image, wave
wmask, wclean, interfill, hextract, hswarp,
concatenate
"""
from tqdm import tqdm, trange
import os
import math
import numpy as np
from scipy.io import readsav
from scipy.interpolate import interp1d
from astropy import wcs
from astropy.io import ascii
from astropy.table import Table
from reproject import reproject_interp, reproject_exact, reproject_adaptive
from reproject.mosaicking import reproject_and_coadd
import subprocess as SP
import warnings
# warnings.filterwarnings("ignore", category=RuntimeWarning)
# warnings.filterwarnings("ignore", message="Skipping SYSTEM_VARIABLE record")
## Local
from utilities import InputError
from inout import (fitsext, csvext, ascext, fclean,
read_fits, write_fits, savext, write_hdf5,
# read_csv, write_csv, read_ascii,
)
from arrays import listize, closest, pix2sup, sup2pix
from maths import nanavg, bsplinterpol
from astrom import fixwcs, get_pc, pix2sr
##-----------------------------------------------
##
## <improve> based tools
##
##-----------------------------------------------
class improve:
'''
IMage PROcessing VEssel
'''
def __init__(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
self: filIN, wmod, hdr, w, cdelt, pc, cd, Ndim, Nx, Ny, Nw, im, wvl
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
if self.im is not None:
self.Ndim = self.im.ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
if self.hdr is not None:
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Raw size (pix): {} * {}'.format(self.Nx, self.Ny))
def reinit(self, filIN=None, header=None, image=None, wave=None,
wmod=0, verbose=False):
'''
Update init variables
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## Read image/cube
if filIN is not None:
ds = read_fits(filIN)
self.hdr = ds.header
self.im = ds.data
self.wvl = ds.wave
else:
self.hdr = header
self.im = image
self.wvl = wave
self.Ndim = self.im.ndim
self.hdr['NAXIS'] = self.Ndim
if self.Ndim==3:
self.Nw, self.Ny, self.Nx = self.im.shape
## Nw=1 patch
if self.im.shape[0]==1:
self.Ndim = 2
del self.hdr['NAXIS3']
else:
self.hdr['NAXIS3'] = self.Nw
elif self.Ndim==2:
self.Ny, self.Nx = self.im.shape
self.Nw = None
self.hdr['NAXIS2'] = self.Ny
self.hdr['NAXIS1'] = self.Nx
hdr = self.hdr.copy()
ws = fixwcs(header=hdr, mode='red_dim')
self.hdred = ws.header # reduced header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
if verbose==True:
print('<improve> file: ', filIN)
print('Image size (pix): {} * {}'.format(self.Nx, self.Ny))
def uncert(self, filOUT=None, filUNC=None, filWGT=None, wfac=1.,
BG_image=None, BG_weight=None, zerovalue=np.nan):
'''
Estimate uncertainties from the background map
So made error map is uniform/weighted
------ INPUT ------
filOUT output uncertainty map (FITS)
filUNC input uncertainty map (FITS)
filWGT input weight map (FITS)
wfac multiplication factor for filWGT (Default: 1)
BG_image background image array used to generate unc map
BG_weight background weight array
zerovalue value used to replace zero value (Default: NaN)
------ OUTPUT ------
unc estimated unc map
'''
if filUNC is not None:
unc = read_fits(filUNC).data
else:
if BG_image is not None:
im = BG_image
Ny, Nx = BG_image.shape
else:
im = self.im
Ny = self.Ny
Nx = self.Nx
Nw = self.Nw
## sigma: std dev of (weighted) flux distribution of bg region
if BG_weight is not None:
if self.Ndim==3:
sigma = np.nanstd(im * BG_weight, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im * BG_weight)
else:
if self.Ndim==3:
sigma = np.nanstd(im, axis=(1,2))
elif self.Ndim==2:
sigma = np.nanstd(im)
## wgt: weight map
if filWGT is not None:
wgt = read_fits(filWGT).data * wfac
else:
wgt = np.ones(self.im.shape) * wfac
## unc: weighted rms = root of var/wgt
if self.Ndim==3:
unc = []
for w in range(Nw):
unc.append(np.sqrt(1./wgt[w,:,:]) * sigma(w))
unc = np.array(unc)
elif self.Ndim==2:
unc = np.sqrt(1./wgt) * sigma
## Replace zero values
unc[unc==0] = zerovalue
self.unc = unc
if filOUT is not None:
write_fits(filOUT, self.hdr, unc, self.wvl, self.wmod)
return unc
def rand_norm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random N(0,1) noise
'''
if filUNC is not None:
unc = read_fits(filUNC).data
if unc is not None:
## unc should have the same dimension with im
theta = np.random.normal(mu, sigma, self.im.shape)
self.im += theta * unc
return self.im
def rand_splitnorm(self, filUNC=None, unc=None, sigma=1., mu=0.):
'''
Add random SN(0,lam,lam*tau) noise
------ INPUT ------
filUNC 2 FITS files for unc of left & right sides
unc 2 uncertainty ndarrays
------ OUTPUT ------
'''
if filUNC is not None:
unc = []
for f in filUNC:
unc.append(read_fits(f).data)
if unc is not None:
## unc[i] should have the same dimension with self.im
tau = unc[1]/unc[0]
peak = 1/(1+tau)
theta = np.random.normal(mu, sigma, self.im.shape) # ~N(0,1)
flag = np.random.random(self.im.shape) # ~U(0,1)
if self.Ndim==2:
for x in range(self.Nx):
for y in range(self.Ny):
if flag[y,x]<peak[y,x]:
self.im[y,x] += -abs(theta[y,x]) * unc[0][y,x]
else:
self.im[y,x] += abs(theta[y,x]) * unc[1][y,x]
elif self.Ndim==3:
for x in range(self.Nx):
for y in range(self.Ny):
for k in range(self.Nw):
if flag[k,y,x]<peak[k,y,x]:
self.im[k,y,x] += -abs(
theta[k,y,x]) * unc[0][k,y,x]
else:
self.im[k,y,x] += abs(
theta[k,y,x]) * unc[1][k,y,x]
return self.im
def rand_pointing(self, sigma=0, header=None, fill='med',
xscale=1, yscale=1, swarp=False, tmpdir=None):
'''
Add pointing uncertainty to WCS
------ INPUT ------
sigma pointing accuracy (arcsec)
header baseline
fill fill value of no data regions after shift
'med': axis median (default)
'avg': axis average
'near': nearest non-NaN value on the same axis
float: constant
xscale,yscale regrouped super pixel size
swarp use SWarp to perform position shifts
Default: False (not support supix)
------ OUTPUT ------
'''
if sigma>=0:
sigma /= 3600.
d_ro = abs(np.random.normal(0., sigma)) # N(0,sigma)
d_phi = np.random.random() *2. * np.pi # U(0,2*pi)
# d_ro, d_phi = 0.0002, 4.5
# print('d_ro,d_phi = ', d_ro,d_phi)
## New header/WCS
if header is None:
header = self.hdr
wcs = fixwcs(header=header, mode='red_dim').wcs
Nx = header['NAXIS1']
Ny = header['NAXIS2']
newheader = header.copy()
newheader['CRVAL1'] += d_ro * np.cos(d_phi)
newheader['CRVAL2'] += d_ro * np.sin(d_phi)
newcs = fixwcs(header=newheader, mode='red_dim').wcs
## Convert world increment to pix increment
pix = wcs.all_world2pix(newheader['CRVAL1'], newheader['CRVAL2'], 1)
d_x = pix[0] - header['CRPIX1']
d_y = pix[1] - header['CRPIX2']
# print('Near CRPIXn increments: ', d_x, d_y)
# val1 = np.array(newcs.all_pix2world(0.5, 0.5, 1))
# d_x, d_y = wcs.all_world2pix(val1[np.newaxis,:], 1)[0] - 0.5
# print('Near (1,1) increments: ', d_x, d_y)
oldimage = self.im
## Resampling
if swarp:
## Set path of tmp files (SWarp use only)
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_swp/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
## Works but can be risky since iswarp.combine included rand_pointing...
write_fits(path_tmp+'tmp_rand_shift',
newheader, self.im, self.wvl)
swp = iswarp(refheader=self.hdr, tmpdir=path_tmp)
rep = swp.combine(path_tmp+'tmp_rand_shift',
combtype='avg', keepedge=True)
self.im = rep.data
else:
if self.Ndim==3:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((self.Nw,Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=2)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=2)
elif fill=='near':
fill_value = np.nanmean(self.im[:,:,x0[0]:x0[-1]+1],axis=2)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (frac2-f2) * np.nanmean(self.im[:,:,x2[0]:x2[-1]+1],axis=2)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,:,xs] += (f2+frac1) * np.nanmean(self.im[:,:,x1[0]:x1[-1]+1],axis=2)
else:
cube_supx[:,:,xs] += (f2+frac1) * fill_value
else:
cube_supx[:,:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
Nys = math.ceil(Ny/yscale)
supcube = np.zeros((self.Nw,Nys,Nxs))
frac2 = d_y / yscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for ys in range(Nys):
if frac2>=0:
y0 = sup2pix(0, yscale, Npix=Ny, origin=0)
else:
y0 = sup2pix(Nys-1, yscale, Npix=Ny, origin=0)
if fill=='med':
fill_value = np.nanmedian(cube_supx,axis=1)
elif fill=='avg':
fill_value = np.nanmean(cube_supx,axis=1)
elif fill=='near':
fill_value = np.nanmean(cube_supx[:,y0[0]:y0[-1]+1,:],axis=1)
else:
fill_value = fill
if frac2>=0:
if ys>=f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]:y1[-1]+1,:],axis=1)
if ys>f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
else:
supcube[:,ys,:] += (frac2-f2) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
else:
if ys<=Nys+f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (frac2-f2) * np.nanmean(cube_supx[:,y2[0]:y2[-1]+1,:],axis=1)
if ys<Nys+f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[:,ys,:] += (f2+frac1) * np.nanmean(cube_supx[:,y1[0]-1:y1[-1],:],axis=1)
else:
supcube[:,ys,:] += (f2+frac1) * fill_value
else:
supcube[:,ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
for x in range(Nx):
for y in range(Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
self.im[:,y,x] = supcube[:,ys,xs]
elif self.Ndim==2:
Nxs = math.ceil(Nx/xscale)
cube_supx = np.zeros((Ny,Nxs))
frac2 = d_x / xscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for xs in range(Nxs):
if frac2>=0:
x0 = sup2pix(0, xscale, Npix=Nx, origin=0)
else:
x0 = sup2pix(Nxs-1, xscale, Npix=Nx, origin=0)
if fill=='med':
fill_value = np.nanmedian(self.im,axis=1)
elif fill=='avg':
fill_value = np.nanmean(self.im,axis=1)
elif fill=='near':
fill_value = np.nanmean(self.im[:,x0[0]:x0[-1]+1],axis=1)
else:
fill_value = fill
if frac2>=0:
if xs>=f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1)
if xs>f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
else:
cube_supx[:,xs] += (frac2-f2) * fill_value
else:
cube_supx[:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
else:
if xs<=Nxs+f2:
x2 = sup2pix(xs-f2-1, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (frac2-f2) * np.nanmean(self.im[:,x2[0]:x2[-1]+1],axis=1)
if xs<Nxs+f2:
x1 = sup2pix(xs-f2, xscale, Npix=Nx, origin=0)
cube_supx[:,xs] += (f2+frac1) * np.nanmean(self.im[:,x1[0]:x1[-1]+1],axis=1)
else:
cube_supx[:,xs] += (f2+frac1) * fill_value
else:
cube_supx[:,xs] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super x = {}'.format(xs))
Nys = math.ceil(Ny/yscale)
supcube = np.zeros((Nys,Nxs))
frac2 = d_y / yscale
f2 = math.floor(frac2)
frac1 = 1 - frac2
for ys in range(Nys):
if frac2>=0:
y0 = sup2pix(0, yscale, Npix=Ny, origin=0)
else:
y0 = sup2pix(Nys-1, yscale, Npix=Ny, origin=0)
if fill=='med':
fill_value = np.nanmedian(cube_supx,axis=0)
elif fill=='avg':
fill_value = np.nanmean(cube_supx,axis=0)
elif fill=='near':
fill_value = np.nanmean(cube_supx[y0[0]:y0[-1]+1,:],axis=0)
else:
fill_value = fill
if frac2>=0:
if ys>=f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (f2+frac1) * np.nanmean(cube_supx[y1[0]:y1[-1]+1,:],axis=0)
if ys>f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (frac2-f2) * np.nanmean(cube_supx[y2[0]:y2[-1]+1,:],axis=0)
else:
supcube[ys,:] += (frac2-f2) * fill_value
else:
supcube[ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
else:
if ys<=Nys+f2:
y2 = sup2pix(ys-f2-1, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (frac2-f2) * np.nanmean(cube_supx[y2[0]:y2[-1]+1,:],axis=0)
if ys<Nys+f2:
y1 = sup2pix(ys-f2, yscale, Npix=Ny, origin=0)
supcube[ys,:] += (f2+frac1) * np.nanmean(cube_supx[y1[0]-1:y1[-1],:],axis=0)
else:
supcube[ys,:] += (f2+frac1) * fill_value
else:
supcube[ys,:] += fill_value
# if self.verbose:
# warnings.warn('Zero appears at super y = {}'.format(ys))
for x in range(Nx):
for y in range(Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
self.im[y,x] = supcube[ys,xs]
## Original NaN mask
mask_nan = np.isnan(oldimage)
self.im[mask_nan] = np.nan
## Recover new NaN pixels with zeros
mask_recover = np.logical_and(np.isnan(self.im), ~mask_nan)
self.im[mask_recover] = 0
return self.im
def slice(self, filSL, postfix='', ext=''):
## 3D cube slicing
slist = []
if self.Ndim==3:
# hdr = self.hdr.copy()
# for kw in self.hdr.keys():
# if '3' in kw:
# del hdr[kw]
# hdr['NAXIS'] = 2
for k in range(self.Nw):
## output filename list
f = filSL+'_'+'0'*(4-len(str(k)))+str(k)+postfix
slist.append(f+ext)
write_fits(f, self.hdred, self.im[k,:,:]) # gauss_noise inclu
elif self.Ndim==2:
f = filSL+'_0000'+postfix
slist.append(f+ext)
write_fits(f, self.hdred, self.im) # gauss_noise inclu
if self.verbose==True:
print('Input file is a 2D image which cannot be sliced! ')
print('Rewritten with only random noise added (if provided).')
return slist
def slice_inv_sq(self, filSL, postfix=''):
## Inversed square cube slicing
inv_sq = 1./self.im**2
slist = []
if self.Ndim==3:
# hdr = self.hdr.copy()
# for kw in self.hdr.keys():
# if '3' in kw:
# del hdr[kw]
# hdr['NAXIS'] = 2
for k in range(self.Nw):
## output filename list
f = filSL+'_'+'0'*(4-len(str(k)))+str(k)+postfix
slist.append(f)
write_fits(f, self.hdred, inv_sq[k,:,:]) # gauss_noise inclu
elif self.Ndim==2:
f = filSL+'_0000'+postfix
slist.append(f)
write_fits(f, self.hdred, inv_sq) # gauss_noise inclu
return slist
def crop(self, filOUT=None,
sizpix=None, cenpix=None, sizval=None, cenval=None):
'''
If pix and val co-exist, pix will be taken.
------ INPUT ------
filOUT output file
sizpix crop size in pix (dx, dy)
cenpix crop center in pix (x, y)
sizval crop size in deg (dRA, dDEC) -> (dx, dy)
cenval crop center in deg (RA, DEC) -> (x, y)
------ OUTPUT ------
self.im cropped image array
'''
oldimage = self.im
hdr = self.hdr
## Crop center
##-------------
if cenpix is None:
if cenval is None:
raise ValueError('Crop center unavailable! ')
else:
## Convert coord
try:
cenpix = np.array(self.w.all_world2pix(cenval[0], cenval[1], 1))
except wcs.wcs.NoConvergence as e:
cenpix = e.best_solution
print("Best solution:\n{0}".format(e.best_solution))
print("Achieved accuracy:\n{0}".format(e.accuracy))
print("Number of iterations:\n{0}".format(e.niter))
else:
cenval = self.w.all_pix2world(np.array([cenpix]), 1)[0]
if not (0<cenpix[0]-0.5<self.Nx and 0<cenpix[1]-0.5<self.Ny):
raise ValueError('Crop centre overpassed image border! ')
## Crop size
##-----------
if sizpix is None:
if sizval is None:
raise ValueError('Crop size unavailable! ')
else:
## CDELTn needed (Physical increment at the reference pixel)
sizpix = np.array(sizval) / abs(self.cdelt)
sizpix = np.array([math.floor(n) for n in sizpix])
else:
sizval = np.array(sizpix) * abs(self.cdelt)
if self.verbose==True:
print('----------')
print("Crop centre (RA, DEC): [{:.8}, {:.8}]".format(*cenval))
print("Crop size (dRA, dDEC): [{}, {}]\n".format(*sizval))
print("Crop centre (x, y): [{}, {}]".format(*cenpix))
print("Crop size (dx, dy): [{}, {}]".format(*sizpix))
print('----------')
## Lowerleft origin
##------------------
xmin = math.floor(cenpix[0] - sizpix[0]/2.)
ymin = math.floor(cenpix[1] - sizpix[1]/2.)
xmax = xmin + sizpix[0]
ymax = ymin + sizpix[1]
if not (xmin>=0 and xmax<=self.Nx and ymin>=0 and ymax<=self.Ny):
raise ValueError('Crop region overpassed image border! ')
## OUTPUTS
##---------
## New image
if self.Ndim==3:
newimage = oldimage[:, ymin:ymax, xmin:xmax] # gauss_noise inclu
## recover 3D non-reduced header
# hdr = read_fits(self.filIN).header
elif self.Ndim==2:
newimage = oldimage[ymin:ymax, xmin:xmax] # gauss_noise inclu
## Modify header
##---------------
hdr['CRPIX1'] = math.floor(sizpix[0]/2. + 0.5)
hdr['CRPIX2'] = math.floor(sizpix[1]/2. + 0.5)
hdr['CRVAL1'] = cenval[0]
hdr['CRVAL2'] = cenval[1]
self.hdr = hdr
self.im = newimage
## Write cropped image/cube
if filOUT is not None:
# comment = "[ICROP]ped at centre: [{:.8}, {:.8}]. ".format(*cenval)
# comment = "with size [{}, {}] (pix).".format(*sizpix)
write_fits(filOUT, self.hdr, self.im, self.wvl, self.wmod)
## Update self variables
self.reinit(header=self.hdr, image=self.im, wave=self.wvl,
wmod=self.wmod, verbose=self.verbose)
return self.im
def rebin(self, pixscale=None, total=False, extrapol=False, filOUT=None):
'''
Shrinking (box averaging) or expanding (bilinear interpolation) astro images
New/old images collimate on zero point.
[REF] IDL lib frebin/hrebin
https://idlastro.gsfc.nasa.gov/ftp/pro/astrom/hrebin.pro
https://github.com/wlandsman/IDLAstro/blob/master/pro/frebin.pro
------ INPUT ------
pixscale output pixel scale in arcsec/pixel
scalar - square pixel
tuple - same Ndim with image
total Default: False
True - sum the non-NaN pixels
False - mean
extrapol Default: False
True - value weighted by non NaN fractions
False - NaN if any fraction is NaN
filOUT output file
------ OUTPUT ------
newimage rebinned image array
'''
oldimage = self.im
hdr = self.hdr
oldheader = hdr.copy()
oldw = self.w
# cd = w.pixel_scale_matrix
oldcd = self.cd
oldcdelt = self.cdelt
oldNx = self.Nx
oldNy = self.Ny
if pixscale is not None:
pixscale = listize(pixscale)
if len(pixscale)==1:
pixscale.extend(pixscale)
else:
warnings.warn('Non-square pixels present as square on DS9. '
'WCS will not work either.')
## convert arcsec to degree
cdelt = np.array(pixscale) / 3600.
## Expansion (>1) or contraction (<1) in X/Y
xratio = cdelt[0] / abs(oldcdelt[0])
yratio = cdelt[1] / abs(oldcdelt[1])
else:
pixscale = listize(abs(oldcdelt) * 3600.)
xratio = 1.
yratio = 1.
if self.verbose==True:
print('----------')
print('The actual map size is {} * {}'.format(self.Nx, self.Ny))
print('The actual pixel scale is {} * {} arcsec'.format(*pixscale))
print('----------')
raise InputError('<improve.rebin>',
'No pixscale, nothing has been done!')
## Modify header
##---------------
## Fix CRVALn
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
hdr['CRPIX1'] = (crpix1 - 0.5) / xratio + 0.5
hdr['CRPIX2'] = (crpix2 - 0.5) / yratio + 0.5
cd = oldcd * [xratio,yratio]
hdr['CD1_1'] = cd[0][0]
hdr['CD2_1'] = cd[1][0]
hdr['CD1_2'] = cd[0][1]
hdr['CD2_2'] = cd[1][1]
for kw in oldheader.keys():
if 'PC' in kw:
del hdr[kw]
if 'CDELT' in kw:
del hdr[kw]
# lam = yratio/xratio
# pix_ratio = xratio*yratio
Nx = math.ceil(oldNx / xratio)
Ny = math.ceil(oldNy / yratio)
# Nx = int(oldNx/xratio + 0.5)
# Ny = int(oldNy/yratio + 0.5)
## Rebin
##-------
'''
## Ref: poppy(v0.3.4).utils.krebin
## Klaus P's fastrebin from web
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1).sum(1)
'''
if self.Ndim==3:
image_newx = np.zeros((self.Nw,oldNy,Nx))
newimage = np.zeros((self.Nw,Ny,Nx))
nanbox = np.zeros((self.Nw,Ny,Nx))
elif self.Ndim==2:
image_newx = np.zeros((oldNy,Nx))
newimage = np.zeros((Ny,Nx))
nanbox = np.zeros((Ny,Nx))
## istart/old1, istop/old2, rstart/new1, rstop/new2 are old grid indices
if not extrapol:
## Sample x axis
##---------------
for x in range(Nx):
rstart = x * xratio # float
istart = int(rstart) # int
frac1 = rstart - istart
rstop = rstart + xratio # float
if int(rstop)<oldNx:
## Full covered new pixels
istop = int(rstop) # int
frac2 = 1. - (rstop - istop)
else:
## Upper edge (value 0 for uncovered frac: frac2)
istop = oldNx - 1 # int
frac2 = 0
if istart==istop:
## Shrinking case with old pix containing whole new pix (box averaging)
if self.Ndim==3:
image_newx[:,:,x] = (1.-frac1-frac2) * oldimage[:,:,istart]
elif self.Ndim==2:
image_newx[:,x] = (1.-frac1-frac2) * oldimage[:,istart]
else:
## Other cases (bilinear interpolation)
if self.Ndim==3:
edges = frac1*oldimage[:,:,istart] + frac2*oldimage[:,:,istop]
image_newx[:,:,x] = np.sum(oldimage[:,:,istart:istop+1],axis=2) - edges
elif self.Ndim==2:
edges = frac1*oldimage[:,istart] + frac2*oldimage[:,istop]
image_newx[:,x] = np.sum(oldimage[:,istart:istop+1],axis=1) - edges
## Sample y axis
##---------------
for y in range(Ny):
rstart = y * yratio # float
istart = int(rstart) # int
frac1 = rstart - istart
rstop = rstart + yratio # float
if int(rstop)<oldNy:
## Full covered new pixels
istop = int(rstop) # int
frac2 = 1. - (rstop - istop)
else:
## Upper edge (value 0 for uncovered frac: frac2)
istop = oldNy - 1 # int
frac2 = 0
if istart==istop:
## Shrinking case with old pix containing whole new pix (box averaging)
if self.Ndim==3:
newimage[:,y,:] = (1.-frac1-frac2) * image_newx[:,istart,:]
elif self.Ndim==2:
newimage[y,:] = (1.-frac1-frac2) * image_newx[istart,:]
else:
## Other cases (bilinear interpolation)
if self.Ndim==3:
edges = frac1*image_newx[:,istart,:] + frac2*image_newx[:,istop,:]
newimage[:,y,:] = np.sum(image_newx[:,istart:istop+1,:],axis=1) - edges
elif self.Ndim==2:
edges = frac1*image_newx[istart,:] + frac2*image_newx[istop,:]
newimage[y,:] = np.sum(image_newx[istart:istop+1,:],axis=0) - edges
if not total:
newimage = newimage / (xratio*yratio)
else:
## Sample y axis
##---------------
for y in range(Ny):
rstart = y * yratio # float
istart = int(rstart) # int
frac1 = rstart - istart
rstop = rstart + yratio # float
if int(rstop)<oldNy:
## Full covered new pixels
istop = int(rstop) # int
frac2 = 1. - (rstop - istop)
else:
## Upper edge (value 0 for uncovered frac: frac2)
istop = oldNy - 1 # int
frac2 = (rstop - istop) - 1.
## Sample x axis
##---------------
for x in range(Nx):
new1 = x * xratio # float
old1 = int(new1) # int
f1 = new1 - old1
new2 = new1 + xratio # float
if int(new2)<oldNx:
## Full covered new pixels
old2 = int(new2) # int
f2 = 1. - (new2 - old2)
else:
## Upper edge (value 0 for uncovered frac: f2)
old2 = oldNx - 1 # int
f2 = (new2 - old2) - 1. # out frac
## For each pixel (x,y) in new grid,
## find NaNs in old grid and
## recalculate nanbox[w,y,x] taking into account fractions
for j in range(istop+1-istart):
for i in range(old2+1-old1):
## old y grid
if j==0:
ybox = 1.-frac1
elif j==istop-istart:
if int(rstop)<oldNy:
ybox = 1.-frac2
else:
ybox = rstop-istop-1.
else:
ybox = 1.
## old x grid
if i==0:
xbox = 1.-f1
elif i==old2-old1:
if int(new2)<oldNx:
xbox = 1.-f2
else:
xbox = f2
else:
xbox = 1.
## old 2D grid
if self.Ndim==3:
for w in range(self.Nw):
if ~np.isnan(oldimage[w,istart+j,old1+i]):
newimage[w,y,x] += oldimage[w,istart+j,old1+i] * ybox * xbox
nanbox[w,y,x] += ybox * xbox
elif self.Ndim==2:
if ~np.isnan(oldimage[istart+j,old1+i]):
newimage[y,x] += oldimage[istart+j,old1+i] * ybox * xbox
nanbox[y,x] += ybox * xbox
if not total:
newimage = np.where(nanbox==0, np.nan, newimage/nanbox)
newimage[newimage==0] = np.nan
if filOUT is not None:
write_fits(filOUT, hdr, newimage, self.wvl, self.wmod)
## Update self variables
self.reinit(header=hdr, image=newimage, wave=self.wvl,
wmod=self.wmod, verbose=self.verbose)
if self.verbose==True:
print('----------')
print('The actual map size is {} * {}'.format(self.Nx, self.Ny))
print('The actual pixel scale is {} * {} arcsec'.format(*pixscale))
print('\n <improve> Rebin [done]')
print('----------')
return newimage
def groupixel(self, xscale=1, yscale=1, filOUT=None):
'''
Group a cluster of pixels (with their mean value)
------ INPUT ------
xscale,yscale grouped super pixel size
'''
Nxs = math.ceil(self.Nx/xscale)
Nys = math.ceil(self.Ny/yscale)
if self.Ndim==3:
## Super pixels
image_sup = np.zeros((self.Nw,Nys,Nxs))
for xs in range(Nxs):
xarr = sup2pix(xs, xscale, Npix=self.Nx, origin=0)
for ys in range(Nys):
yarr = sup2pix(ys, yscale, Npix=self.Ny, origin=0)
im = self.im[:,yarr[0]:yarr[-1]+1,xarr[0]:xarr[-1]+1]
image_sup[:,ys,xs] += np.nanmean(im,axis=(1,2))
## Grouped pixels
image_grp = np.zeros((self.Nw,self.Ny,self.Nx))
for x in range(self.Nx):
for y in range(self.Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
image_grp[:,y,x] = image_sup[:,ys,xs]
elif self.Ndim==2:
## Super pixels
image_sup = np.zeros((Nys,Nxs))
for xs in range(Nxs):
xarr = sup2pix(xs, xscale, Npix=self.Nx, origin=0)
for ys in range(Nys):
yarr = sup2pix(ys, yscale, Npix=self.Ny, origin=0)
im = self.im[yarr[0]:yarr[-1]+1,xarr[0]:xarr[-1]+1]
image_sup[ys,xs] += np.nanmean(im)
## Grouped pixels
image_grp = np.zeros((self.Ny,self.Nx))
for x in range(self.Nx):
for y in range(self.Ny):
xs = pix2sup(x, xscale, origin=0)
ys = pix2sup(y, yscale, origin=0)
image_grp[y,x] = image_sup[ys,xs]
if filOUT is not None:
write_fits(filOUT, self.hdr, image_grp, self.wvl, self.wmod)
## Update self variables
self.reinit(header=self.hdr, image=image_grp, wave=self.wvl,
wmod=self.wmod, verbose=self.verbose)
return image_grp
def smooth(self, smooth=1, wgrid=None, wstart=None, filOUT=None):
'''
Smooth wavelengths
If shift, not compatible with unc which needs MC propagation
------ INPUT ------
smooth smooth wavelength grid by linear interpolation (Default: 1)
wgrid external wavelength grid (Default: None)
wstart shift wavelength grid to wstart origin (Default: None)
'''
## Replace wavelength grid
if wgrid is not None:
wvl = wgrid
Nw0 = len(wgrid)
else:
wvl = self.wvl
Nw0 = self.Nw
## Wavelength shift (within original w range)
if wstart is not None:
wshift = wstart - wvl[0]
else:
wshift = 0
newave = []
nan_left = 0
nan_right = 0
for k in range(Nw0):
if k%smooth==0:
w = wvl[k]+wshift
## New wgrid should be within the interpolation range (or give NaNs)
newave.append(w)
if w<self.wvl[0]:
nan_left+=1
elif w>self.wvl[-1]:
nan_right+=1
newave = np.array(newave)
Nw = len(newave)
newcube = np.empty([Nw,self.Ny,self.Nx])
for x in range(self.Nx):
for y in range(self.Ny):
f = interp1d(self.wvl, self.im[:,y,x], kind='linear')
newcube[nan_left:Nw-nan_right,y,x] = f(newave[nan_left:Nw-nan_right])
newcube[:nan_left,y,x] = np.nan
newcube[Nw-nan_right:,y,x] = np.nan
if filOUT is not None:
write_fits(filOUT, self.hdr, newcube, newave, self.wmod)
## Update self variables
self.reinit(header=self.hdr, image=newcube, wave=newave,
wmod=self.wmod, verbose=self.verbose)
return newcube
def artifact(self, filUNC=None, BG_image=None, zerovalue=np.nan,
wmin=None, wmax=None, lim_unc=1.e2, fltr_pn=None, cmin=5,
filOUT=None):
'''
Remove spectral artifacts (Interpolate aberrant wavelengths)
Anormaly if:
abs(v - v_med) / unc > lim_unc
------ INPUT ------
filUNC input uncertainty map (FITS)
filOUT output spectral map (FITS)
BG_image background image used to generate unc map
zerovalue value used to replace zero value (Default:NaN)
wmin,wmax wavelength range to clean (float)
lim_unc uncertainty dependant factor limit (positive float)
fltr_pn positive/negtive filter (Default: None)
'p' - clean only positive aberrant
'n' - clean only negtive aberrant
cmin minimum neighboring artifacts
------ OUTPUT ------
im cleaned spectral map
'''
im = self.im
wvl = self.wvl
unc = self.uncert(filUNC=filUNC,BG_image=BG_image,zerovalue=zerovalue)
if wmin is None:
wmin = wvl[0]
iwi = listize(wvl).index(wvl[closest(wvl,wmin)])
if wmax is None:
wmax = wvl[-1]
iws = listize(wvl).index(wvl[closest(wvl,wmax)])
if lim_unc<0:
raise ValueError('lim_unc must be positive!')
## Scan every pixel/spectrum at each wavelength
for w in trange(self.Nw, leave=False,
desc='<improve> Cleaning spectral artifacts'):
if w>=iwi and w<=iws:
pix_x = []
pix_y = []
for y in range(self.Ny):
for x in range(self.Nx):
v_med = np.median(im[iwi:iws,y,x])
dv = (im[w,y,x] - v_med) / unc[w,y,x]
if fltr_pn is None or fltr_pn=='p':
if dv > lim_unc:
pix_x.append(x)
pix_y.append(y)
if fltr_pn is None or fltr_pn=='n':
if dv < -lim_unc:
pix_x.append(x)
pix_y.append(y)
pix_x = np.array(pix_x)
pix_y = np.array(pix_y)
## If the neighbors share the feature, not an artifact
for ix, x in enumerate(pix_x):
counter = 0
for iy, y in enumerate(pix_y):
if abs(y-pix_y[ix]+pix_x[iy]-x)<=2:
counter += 1
## max(counter) == 12
if counter<cmin:
if w==0:
im[w,pix_y[ix],x] = im[w+1,pix_y[ix],x]
elif w==self.Nw-1:
im[w,pix_y[ix],x] = im[w-1,pix_y[ix],x]
else:
im[w,pix_y[ix],x] = (im[w-1,pix_y[ix],x]+im[w+1,pix_y[ix],x])/2
# im[w,pix_y[ix],x] = np.median(im[iwi:iws,pix_y[ix],x])
if filOUT is not None:
comment = "Cleaned by <improve.artifact>"
write_fits(filOUT, self.hdr, im, wvl,
COMMENT=comment)
return im
def mask(self):
'''
'''
pass
class Jy_per_pix_to_MJy_per_sr(improve):
'''
Convert image unit from Jy/pix to MJy/sr
------ INPUT ------
filIN input FITS file
filOUT output FITS file
------ OUTPUT ------
'''
def __init__(self, filIN, filOUT=None, wmod=0, verbose=False):
super().__init__(filIN, wmod=wmod, verbose=verbose)
## gmean( Jy/MJy / sr/pix )
ufactor = np.sqrt(np.prod(1.e-6/pix2sr(1., self.cdelt)))
self.im = self.im * ufactor
self.hdr['BUNIT'] = 'MJy/sr'
if filOUT is not None:
write_fits(filOUT, self.hdr, self.im, self.wvl, self.wmod)
def header(self):
return self.hdr
def image(self):
return self.im
def wave(self):
return self.wvl
class iuncert(improve):
'''
Generate uncertainties
------ INPUT ------
filIN input map (FITS)
filOUT output weight map (FITS)
filWGT input weight map (FITS)
wfac multiplication factor for filWGT (Default: 1)
BG_image background image array
BG_weight background weight array
zerovalue value to replace zeros (Default: NaN)
------ OUTPUT ------
'''
def __init__(self, filIN, filOUT=None, filWGT=None, wfac=1,
BG_image=None, BG_weight=None, zerovalue=np.nan):
super().__init__(filIN, wmod=0, verbose=False)
self.uncert(filOUT=filOUT, BG_image=BG_image, zerovalue=zerovalue,
filWGT=filWGT, wfac=wfac, BG_weight=BG_weight)
def unc(self):
return self.unc
class islice(improve):
'''
Slice a cube
------ INPUT ------
filIN input FITS file
filSL ouput path+basename
filUNC input uncertainty FITS
dist unc pdf
slicetype Default: None
None - normal slices
'inv_sq' - inversed square slices
postfix postfix of output slice names
------ OUTPUT ------
self: slist, path_tmp,
(filIN, wmod, hdr, w, cdelt, pc, cd, Ndim, Nx, Ny, Nw, im, wvl)
'''
def __init__(self, filIN, filSL=None, filUNC=None, dist=None,
slicetype=None, postfix=''):
super().__init__(filIN)
if filSL is None:
path_tmp = os.getcwd()+'/tmp_proc/'
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
filSL = path_tmp+'slice'
self.filSL = filSL
if dist=='norm':
self.rand_norm(filUNC)
elif dist=='splitnorm':
self.rand_splitnorm(filUNC)
if slicetype is None:
self.slist = self.slice(filSL, postfix) # gauss_noise inclu
elif slicetype=='inv_sq':
self.slist = self.slice_inv_sq(filSL, postfix)
def image(self):
return self.im
def wave(self):
return self.wvl
def filenames(self):
return self.slist
def clean(self, filIN=None):
if filIN is not None:
fclean(filIN)
else:
fclean(self.filSL+'*')
class icrop(improve):
'''
CROP 2D image or 3D cube
'''
def __init__(self, filIN, filOUT=None,
sizpix=None, cenpix=None, sizval=None, cenval=None,
filUNC=None, dist=None, wmod=0, verbose=False):
## slicrop: slice
super().__init__(filIN, wmod=wmod, verbose=verbose)
if dist=='norm':
self.rand_norm(filUNC)
elif dist=='splitnorm':
self.rand_splitnorm(filUNC)
im_crop = self.crop(filOUT=filOUT, sizpix=sizpix, cenpix=cenpix,
sizval=sizval, cenval=cenval) # gauss_noise inclu
def header(self):
return self.hdr
def image(self):
return self.im
def wave(self):
return self.wvl
class irebin(improve):
'''
REBIN 2D image or 3D cube
'''
def __init__(self, filIN, filOUT=None,
pixscale=None, total=False, extrapol=False,
filUNC=None, dist=None, wmod=0, verbose=False):
super().__init__(filIN, wmod=wmod, verbose=verbose)
if dist=='norm':
self.rand_norm(filUNC)
elif dist=='splitnorm':
self.rand_splitnorm(filUNC)
im_rebin = self.rebin(filOUT=filOUT, pixscale=pixscale,
total=total, extrapol=extrapol)
def header(self):
return self.hdr
def image(self):
return self.im
def wave(self):
return self.wvl
class igroupixel(improve):
'''
GROUP a cluster of PIXELs (with their mean value)
'''
def __init__(self, filIN, filOUT=None,
xscale=1, yscale=1,
wmod=0, verbose=False):
super().__init__(filIN, wmod=wmod, verbose=verbose)
im_grp = self.groupixel(xscale=xscale, yscale=yscale, filOUT=filOUT)
def header(self):
return self.hdr
def image(self):
return self.im
def wave(self):
return self.wvl
class ismooth(improve):
'''
SMOOTH wavelengths
'''
def __init__(self, filIN, filOUT=None,
smooth=1, wgrid=None, wstart=None,
wmod=0, verbose=False):
super().__init__(filIN, wmod=wmod, verbose=verbose)
im_smooth = self.smooth(smooth=smooth, filOUT=filOUT,
wgrid=wgrid, wstart=wstart)
def header(self):
return self.hdr
def image(self):
return self.im
def wave(self):
return self.wvl
class imontage(improve):
'''
2D image or 3D cube montage toolkit
Based on reproject v0.7.1 or later
------ INPUT ------
reproject_function resampling algorithms
'interp': fastest (Default)
'exact': slowest
'adaptive': DeForest2004
tmpdir tmp file path
verbose (Default: False)
------ OUTPUT ------
'''
def __init__(self, reproject_function='interp',
tmpdir=None, verbose=False):
'''
self: func, path_tmp, verbose
'''
if reproject_function=='interp':
self.func = reproject_interp
elif reproject_function=='exact':
self.func = reproject_exact
elif reproject_function=='adaptive':
self.func = reproject_adaptive
else:
raise InputError('<imontage>',
'Unknown reprojection !')
## Set path of tmp files
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_mtg/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
self.path_tmp = path_tmp
## Verbose
if verbose==False:
devnull = open(os.devnull, 'w')
else:
devnull = None
self.verbose = verbose
self.devnull = devnull
def reproject(self, flist, refheader, filOUT=None,
dist=None, sig_pt=0, fill_pt='near') :
'''
Reproject 2D image or 3D cube
------ INPUT ------
flist FITS files to reproject
refheader reprojection header
filOUT output FITS file
dist uncertainty distribution
'norm' - N(0,1)
'splitnorm' - SN(0,lam,lam*tau)
sig_pt pointing accuracy in arcsec (Default: 0)
fill_pt fill value of no data regions after shift
'med': axis median
'avg': axis average
'near': nearest non-NaN value on the same axis (default)
float: constant
------ OUTPUT ------
images reprojected images
'''
flist = listize(flist)
# if refheader is None:
# raise InputError('<imontage>','No reprojection header!')
images = []
for f in flist:
super().__init__(f)
## Set tmp and out
filename = os.path.basename(f)
if filOUT is None:
filOUT = self.path_tmp+filename+'_rep'
## Uncertainty propagation
if dist=='norm':
self.rand_norm(f+'_unc')
elif dist=='splitnorm':
self.rand_splitnorm([f+'_unc_N', f+'_unc_P'])
self.rand_pointing(sig_pt, fill=fill_pt)
write_fits(filOUT, self.hdr, self.im, self.wvl, wmod=0)
## Do reprojection
##-----------------
im = self.func(filOUT+fitsext, refheader)[0]
images.append(im)
comment = "Reprojected by <imontage>. "
write_fits(filOUT, refheader, im, self.wvl, wmod=0,
COMMENT=comment)
return images
def reproject_mc(self, filIN, refheader, filOUT=None,
dist=None, sig_pt=0, fill_pt='near', Nmc=0):
'''
Generate Monte-Carlo uncertainties for reprojected input file
'''
ds = type('', (), {})()
hyperim = [] # [j,(w,)y,x]
for j in trange(Nmc+1, leave=False,
desc='<imontage> Reprojection [MC]'):
if j==0:
im0 = self.reproject(filIN, refheader, filOUT)[0]
else:
hyperim.append( self.reproject(filIN, refheader, filOUT+'_'+str(j),
dist, sig_pt, fill_pt)[0] )
im0 = np.array(im0)
hyperim = np.array(hyperim)
unc = np.nanstd(hyperim, axis=0)
comment = "Reprojected by <imontage>. "
if Nmc>0:
write_fits(filOUT+'_unc', refheader, unc, self.wvl,
COMMENT=comment)
ds.data = im0
ds.unc = unc
ds.hyperdata = hyperim
return ds
def coadd(self, flist, refheader, filOUT=None,
dist=None, sig_pt=0, fill_pt='near', Nmc=0):
'''
Reproject and coadd
'''
flist = listize(flist)
ds = type('', (), {})()
comment = "Created by <imontage>"
slist = [] # slist[j,if,iw]
for j in trange(Nmc+1, leave=False,
desc='<imontage> Slicing... [MC]'):
sl = [] # sl[f,w]
for f in flist:
super().__init__(f)
## Set tmp and out
filename = os.path.basename(f)
if filOUT is None:
filOUT = self.path_tmp+filename+'_rep'
coadd_tmp = self.path_tmp+filename+'/'
if not os.path.exists(coadd_tmp):
os.makedirs(coadd_tmp)
if j==0:
sl.append(self.slice(coadd_tmp+'slice', ext=fitsext))
else:
if dist=='norm':
self.rand_norm(f+'_unc')
elif dist=='splitnorm':
self.rand_splitnorm([f+'_unc_N', f+'_unc_P'])
self.rand_pointing(sig_pt, fill=fill_pt)
sl.append(self.slice(coadd_tmp+'slice',
postfix='_'+str(j), ext=fitsext))
slist.append(np.array(sl))
slist = np.array(slist)
Nw = self.Nw
superim = []
for j in trange(Nmc+1, leave=False,
desc='<imontage> Coadding... [MC]'):
if j==0:
im = []
if self.Ndim==3:
for iw in range(Nw):
im.append(reproject_and_coadd(slist[j,:,iw], refheader,
reproject_function=self.func)[0])
elif self.Ndim==2:
im = reproject_and_coadd(slist[j,:,0], refheader,
reproject_function=self.func)[0]
im = np.array(im)
write_fits(filOUT, refheader, im, self.wvl, wmod=0,
COMMENT=comment)
else:
hyperim = []
for iw in range(Nw):
hyperim.append(reproject_and_coadd(slist[j,:,iw], refheader,
reproject_function=self.func)[0])
superim.append(np.array(hyperim))
write_fits(filOUT+'_'+str(j), refheader, hyperim, self.wvl, wmod=0,
COMMENT=comment)
superim = np.array(superim)
unc = np.nanstd(superim, axis=0)
if Nmc>0:
write_fits(filOUT+'_unc', refheader, unc, self.wvl, wmod=0,
COMMENT=comment)
ds.wave = self.wvl
ds.data = im
ds.unc = unc
ds.hyperdata = superim
return ds
def clean(self, filIN=None):
if filIN is not None:
fclean(filIN)
else:
fclean(self.path_tmp)
class iswarp(improve):
'''
SWarp drop-in image montage toolkit
i means <improve>-based
Alternative to its fully Python-based twin <imontage>
------ INPUT ------
flist ref FITS files used to make header (footprint)
refheader scaling matrix adopted if co-exist with file
center center of output image frame
None - contains all input fields
str('hh:mm:ss,dd:mm:ss') - manual input RA,DEC
pixscale pixel scale (arcsec)
None - median of pixscale at center input frames
float() - in arcseconds
verbose default: True
tmpdir tmp file path
------ OUTPUT ------
coadd.fits
By default, SWarp reprojects all input to a WCS with diag CD matrix.
"To implement the unusual output features required,
one must write a coadd.head ASCII file that contains
a custom anisotropic scaling matrix. "
'''
def __init__(self, flist=None, refheader=None,
center=None, pixscale=None,
verbose=False, tmpdir=None):
'''
self: path_tmp, verbose
(filIN, wmod, hdr, w, Ndim, Nx, Ny, Nw, im, wvl)
'''
if verbose==False:
devnull = open(os.devnull, 'w')
else:
devnull = None
self.verbose = verbose
self.devnull = devnull
## Set path of tmp files
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_swp/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
self.path_tmp = path_tmp
fclean(path_tmp+'coadd*') # remove previous coadd.fits/.head
if flist is None:
if refheader is None:
raise InputError('<iswarp>','No input!')
## Define coadd frame via refheader
else:
if center is not None or pixscale is not None:
warnings.warn('The keywords center and pixscale are dumb. ')
self.refheader = refheader
else:
## Input files in list object
flist = listize(flist)
## Images
image_files = ' '
list_ref = []
for i in range(len(flist)):
image = read_fits(flist[i]).data
hdr = fixwcs(flist[i]+fitsext).header
file_ref = flist[i]
if image.ndim==3:
## Extract 1st frame of the cube
file_ref = path_tmp+os.path.basename(flist[i])+'_ref'
write_fits(file_ref, hdr, image[0])
image_files += file_ref+fitsext+' ' # SWarp input str
list_ref.append(file_ref+fitsext) # reproject input
## Define coadd frame
##--------------------
## via SWarp without refheader (isotropic scaling matrix)
## Create config file
SP.call('swarp -d > swarp.cfg',
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
## Config param list
swarp_opt = ' -c swarp.cfg -SUBTRACT_BACK N -IMAGEOUT_NAME coadd.ref.fits '
if center is not None:
swarp_opt += ' -CENTER_TYPE MANUAL -CENTER '+center
if pixscale is not None:
swarp_opt += ' -PIXELSCALE_TYPE MANUAL -PIXEL_SCALE '+str(pixscale)
if verbose=='quiet':
swarp_opt += ' -VERBOSE_TYPE QUIET '
## Run SWarp
SP.call('swarp '+swarp_opt+image_files,
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
self.refheader = read_fits(path_tmp+'coadd.ref').header
## via reproject with refheader (custom anisotropic scaling matrix)
if refheader is not None:
if center is not None or pixscale is not None:
warnings.warn('The keywords center and pixscale are dumb. ')
super().__init__(path_tmp+'coadd.ref')
pix_old = [[0, 0]]
pix_old.append([0, self.Ny])
pix_old.append([self.Nx, 0])
pix_old.append([self.Nx, self.Ny])
world_arr = self.w.all_pix2world(np.array(pix_old), 1)
w = fixwcs(header=refheader).wcs
try:
pix_new = w.all_world2pix(world_arr, 1)
except wcs.wcs.NoConvergence as e:
pix_new = e.best_solution
print("Best solution:\n{0}".format(e.best_solution))
print("Achieved accuracy:\n{0}".format(e.accuracy))
print("Number of iterations:\n{0}".format(e.niter))
xmin = min(pix_new[:,0])
xmax = max(pix_new[:,0])
ymin = min(pix_new[:,1])
ymax = max(pix_new[:,1])
refheader['CRPIX1'] += -xmin
refheader['CRPIX2'] += -ymin
refheader['NAXIS1'] = math.ceil(xmax - xmin)
refheader['NAXIS2'] = math.ceil(ymax - ymin)
self.refheader = refheader
# fclean(path_tmp+'*ref.fits')
def footprint(self, filOUT=None):
'''
Save reprojection footprint
'''
if filOUT is None:
filOUT = self.path_tmp+'footprint'
Nx = self.refheader['NAXIS1']
Ny = self.refheader['NAXIS2']
im_fp = np.ones((Ny, Nx))
comment = "<iswarp> footprint"
write_fits(filOUT, self.refheader, im_fp, COMMENT=comment)
return im_fp
def combine(self, flist, combtype='med', keepedge=False, cropedge=False,
dist=None, sig_pt=0, fill_pt='near', filOUT=None, tmpdir=None):
'''
SWarp combine (coadding/reprojection)
------ INPUT ------
flist input FITS files should have the same wvl
combtype combine type
'med' - median (default)
'avg' - average
'wgt_avg' - inverse variance weighted average
keepedge default: False
cropedge crop the NaN edge of the frame (Default: False)
dist add uncertainties (filename+'_unc.fits' needed)
sig_pt pointing accuracy in arcsec (Default: 0)
fill_pt fill value of no data regions after shift
'med': axis median
'avg': axis average
'near': nearest non-NaN value on the same axis (default)
float: constant
filOUT output FITS file
------ OUTPUT ------
coadd.head key for SWarp (inherit self.refheader)
'''
ds = type('', (), {})()
verbose = self.verbose
devnull = self.devnull
path_tmp = self.path_tmp
if tmpdir is None:
path_comb = path_tmp+'comb/'
else:
path_comb = tmpdir
if not os.path.exists(path_comb):
os.makedirs(path_comb)
## Input files in list format
flist = listize(flist)
## Header
##--------
with open(path_tmp+'coadd.head', 'w') as f:
f.write(str(self.refheader))
## Images and weights
##--------------------
Nf = len(flist)
imshape = read_fits(flist[0]).data.shape
if len(imshape)==3:
Nw = imshape[0]
wvl = read_fits(flist[0]).wave
else:
Nw = 1
wvl = None
## Build imlist & wgtlist (size=Nf)
imlist = []
wgtlist = []
for i in range(Nf):
filename = os.path.basename(flist[i])
## Set slice file
file_slice = path_comb+filename
## Slice
super().__init__(flist[i])
if dist=='norm':
self.rand_norm(flist[i]+'_unc')
elif dist=='splitnorm':
self.rand_splitnorm([flist[i]+'_unc_N', flist[i]+'_unc_P'])
self.rand_pointing(sig_pt, fill=fill_pt)
imlist.append(self.slice(file_slice, ''))
if combtype=='wgt_avg':
super().__init__(flist[i]+'_unc')
wgtlist.append(self.slice_inv_sq(file_slice, '.weight'))
## Build image_files & weight_files (size=Nw)
image_files = [' ']*Nw
weight_files = [' ']*Nw
## Let's SWarp
##-------------
hyperimage = []
for k in trange(Nw, leave=False,
desc='<iswarp> Combining (by wvl)'):
for i in range(Nf):
image_files[k] += imlist[i][k]+fitsext+' '
if combtype=='wgt_avg':
weight_files[k] += wgtlist[i][k]+fitsext+' '
## Create config file
SP.call('swarp -d > swarp.cfg',
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
## Config param list
swarp_opt = ' -c swarp.cfg -SUBTRACT_BACK N '
if combtype=='med':
pass
elif combtype=='avg':
swarp_opt += ' -COMBINE_TYPE AVERAGE '
elif combtype=='wgt_avg':
swarp_opt += ' -COMBINE_TYPE WEIGHTED '
swarp_opt += ' -WEIGHT_TYPE MAP_WEIGHT '
swarp_opt += ' -WEIGHT_SUFFIX .weight.fits '
# swarp_opt += ' -WEIGHT_IMAGE '+weight_files[k] # not worked
if verbose=='quiet':
swarp_opt += ' -VERBOSE_TYPE QUIET '
## Run SWarp
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE LANCZOS3 '+image_files[k],
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
coadd = read_fits(path_tmp+'coadd')
newimage = coadd.data
newheader = coadd.header
## Add back in the edges because LANCZOS3 kills the edges
## Do it in steps of less and less precision
if keepedge==True:
oldweight = read_fits(path_tmp+'coadd.weight').data
if np.sum(oldweight==0)!=0:
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE LANCZOS2 '+image_files[k],
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
edgeimage = read_fits(path_tmp+'coadd').data
newweight = read_fits(path_tmp+'coadd.weight').data
edgeidx = np.logical_and(oldweight==0, newweight!=0)
if edgeidx.any():
newimage[edgeidx] = edgeimage[edgeidx]
oldweight = read_fits(path_tmp+'coadd.weight').data
if np.sum(oldweight==0)!=0:
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE BILINEAR '+image_files[k],
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
edgeimage = read_fits(path_tmp+'coadd').data
newweight = read_fits(path_tmp+'coadd.weight').data
edgeidx = np.logical_and(oldweight==0, newweight!=0)
if edgeidx.any():
newimage[edgeidx] = edgeimage[edgeidx]
oldweight = read_fits(path_tmp+'coadd.weight').data
if np.sum(oldweight==0)!=0:
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE NEAREST '+image_files[k],
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
edgeimage = read_fits(path_tmp+'coadd').data
newweight = read_fits(path_tmp+'coadd.weight').data
edgeidx = np.logical_and(oldweight==0, newweight!=0)
if edgeidx.any():
newimage[edgeidx] = edgeimage[edgeidx]
## Astrometric flux-rescaling based on the local ratio of pixel scale
## Complementary for lack of FITS kw 'FLXSCALE'
## Because SWarp is conserving surface brightness/pixel
oldcdelt = get_pc(wcs=fixwcs(flist[i]+fitsext).wcs).cdelt
newcdelt = get_pc(wcs=fixwcs(path_tmp+'coadd'+fitsext).wcs).cdelt
old_pixel_fov = abs(oldcdelt[0]*oldcdelt[1])
new_pixel_fov = abs(newcdelt[0]*newcdelt[1])
newimage = newimage * old_pixel_fov/new_pixel_fov
newimage[newimage==0] = np.nan
# write_fits(path_comb+'coadd_'+str(k), newheader, newimage)
# tqdm.write(str(old_pixel_fov))
# tqdm.write(str(new_pixel_fov))
# tqdm.write(str(abs(newheader['CD1_1']*newheader['CD2_2'])))
if Nw==1:
hyperimage = newimage
else:
hyperimage.append(newimage)
hyperimage = np.array(hyperimage)
if cropedge:
reframe = improve(header=newheader, image=hyperimage, wave=wvl)
xlist = []
for x in range(reframe.Nx):
if reframe.Ndim==3:
allnan = np.isnan(reframe.im[:,:,x]).all()
elif reframe.Ndim==2:
allnan = np.isnan(reframe.im[:,x]).all()
if not allnan:
xlist.append(x)
ylist = []
for y in range(reframe.Ny):
if reframe.Ndim==3:
allnan = np.isnan(reframe.im[:,y,:]).all()
elif reframe.Ndim==2:
allnan = np.isnan(reframe.im[y,:]).all()
if not allnan:
ylist.append(y)
xmin = min(xlist)
xmax = max(xlist)+1
ymin = min(ylist)
ymax = max(ylist)+1
dx = xmax-xmin
dy = ymax-ymin
x0 = xmin+dx/2
y0 = ymin+dy/2
reframe.crop(filOUT=path_tmp+'coadd.ref',
sizpix=(dx,dy), cenpix=(x0,y0))
newheader = reframe.hdr
hyperimage = reframe.im
cropcenter = (x0,y0)
cropsize = (dx,dy)
else:
cropcenter = None
cropsize = None
if filOUT is not None:
write_fits(filOUT, newheader, hyperimage, wvl)
if tmpdir is None:
fclean(path_comb)
ds.header = newheader
ds.data = hyperimage
ds.wave = wvl
ds.cropcenter = cropcenter
ds.cropsize = cropsize
return ds
def combine_mc(self, filIN, Nmc=0,
combtype='med', keepedge=False, cropedge=False,
dist=None, sig_pt=0, fill_pt='near',
filOUT=None, tmpdir=None):
'''
Generate Monte-Carlo uncertainties for reprojected input file
'''
ds = type('', (), {})()
hyperim = [] # [j,(w,)y,x]
for j in trange(Nmc+1, leave=False,
desc='<iswarp> Reprojection (MC level)'):
if j==0:
comb = self.combine(filIN, filOUT=filOUT, tmpdir=tmpdir,
combtype=combtype, keepedge=keepedge, cropedge=cropedge)
im0 = comb.data
else:
hyperim.append( self.combine(filIN, filOUT=filOUT+'_'+str(j),
tmpdir=tmpdir, combtype=combtype,
keepedge=keepedge, cropedge=cropedge,
dist=dist, sig_pt=sig_pt, fill_pt=fill_pt).data )
im0 = np.array(im0)
hyperim = | np.array(hyperim) | numpy.array |
# -*- coding: utf-8 -*-
# @Author: Bao
# @Date: 2021-12-11 08:47:12
# @Last Modified by: dorihp
# @Last Modified time: 2022-01-07 14:19:15
import json
import time
import cv2
import numpy as np
from onvif import ONVIFCamera
class Detector():
def __init__(self, cfg, weights, classes, input_size):
super(Detector, self).__init__()
assert input_size % 32 == 0, "Input size must be a multiple of 32!"
# Init detector and it's parameters
self.net = cv2.dnn_DetectionModel(cfg, weights)
self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
self.input_size = input_size
self.net.setInputSize(input_size, input_size)
self.net.setInputScale(1.0 / 255)
self.net.setInputSwapRB(True)
with open(classes, 'rt') as f:
self.classes = f.read().rstrip('\n').split('\n')
def detect(self, frame, cl_filter):
classes, _, boxes = self.net.detect(frame, confThreshold=0.1, nmsThreshold=0.4)
cen_x = cen_y = False
# print(classes, boxes)
if len(classes):
for _class, box in zip(classes.flatten(), boxes):
# if _class != cl_filter:
# continue
left, top, width, height = box
cen_x = int(left + width / 2)
cen_y = int(top + height / 2)
right = left + width
bottom = top + height
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.circle(frame, (cen_x, cen_y), radius=0, color=(0, 0, 255), thickness=5)
# break
return frame, classes, boxes
# return frame, cen_x, cen_y
class Undistort():
def __init__(self, params):
super(Undistort, self).__init__()
parameters = | np.load(params) | numpy.load |
"""Tests for the InsertAlongAxis op."""
import numpy as np
import theano
from theano import config
from theano import tensor
from pylearn2.utils.insert_along_axis import (
insert_columns, insert_rows, InsertAlongAxis
)
def test_insert_along_axis():
x = tensor.matrix()
y = insert_columns(x, 10, range(0, 10, 2))
f = theano.function([x], y)
x_ = np.random.normal(size=(7, 5)).astype(config.floatX)
f_val = f(x_)
assert f_val.shape == (7, 10)
assert np.all(f_val[:, range(0, 10, 2)] == x_)
assert f_val.dtype == x_.dtype
y = insert_rows(x, 10, range(0, 10, 2))
f = theano.function([x], y)
x_ = | np.random.normal(size=(5, 6)) | numpy.random.normal |
# Python 2 backwards compatibility overhead START
"""
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
"""
import math as mt
import numpy as np
from . import dev_tool, data_storage
def mayou_score(
mc_data, real_data, features=None, old_mc_weights=1, clf="xgb", splits=2, n_folds=10
):
"""An experimental score using a "loss" function for data-similarity"""
import raredecay.analysis.ml_analysis as ml_ana
from raredecay.globals_ import out
features = dev_tool.entries_to_str(features)
clf = dev_tool.entries_to_str(clf)
# initialize variables
output = {}
score_mc_vs_mcr = []
score_mcr_vs_real = []
# splits *= 2 # because every split is done with fold 0 and 1 (<- 2 *)
# loop over number of splits, split the mc data
mc_data.make_folds(n_folds)
real_data.make_folds(n_folds)
# mc reweighted vs mc
for fold in range(n_folds):
mc_data_train, mc_data_test = mc_data.get_fold(fold)
# TODO: no real folds? It is better to test on full data always?
# mc_data_train, mc_data_test = real_data.get_fold(fold)
for split in range(splits * 2): # because two possibilities per split
if split % 2 == 0:
mc_data_train.make_folds(2)
mc_normal, mc_reweighted = mc_data_train.get_fold(split % 2)
mc_normal.set_weights(old_mc_weights)
score_mc_vs_mcr.append(
ml_ana.classify(
original_data=mc_normal,
target_data=mc_reweighted,
features=features,
validation=[mc_data_test, real_data],
clf=clf,
plot_importance=1,
# TODO: no weights ratio? (roc auc)
weights_ratio=0,
)[1]
)
out.add_output(
[
"mayou_score mc vs mc reweighted test on mc vs real score: ",
score_mc_vs_mcr,
"\nMean: ",
np.mean(score_mc_vs_mcr),
" +-",
np.std(score_mc_vs_mcr) / mt.sqrt(len(score_mc_vs_mcr) - 1),
],
subtitle="Mayou score",
to_end=True,
)
output["mc_distance"] = np.mean(score_mc_vs_mcr)
# mc_reweighted vs real
for fold in range(n_folds):
real_train, real_test = real_data.get_fold(fold)
mc_train, mc_test = mc_data.get_fold(fold)
mc_test.set_weights(old_mc_weights)
score_mcr_vs_real.append(
ml_ana.classify(
original_data=mc_train,
target_data=real_train,
features=features,
validation=[mc_test, real_test],
clf=clf,
plot_importance=1,
# TODO: no weights ratio? (roc auc)
weights_ratio=0,
)[1]
)
out.add_output(
[
"mayou_score real vs mc reweighted test on mc vs real score: ",
score_mcr_vs_real,
"\nMean: ",
np.mean(score_mcr_vs_real),
" +-",
np.std(score_mcr_vs_real) / mt.sqrt(len(score_mcr_vs_real) - 1),
],
to_end=True,
)
output["real_distance"] = np.mean(score_mcr_vs_real)
def train_similar(
mc_data,
real_data,
features=None,
n_checks=10,
n_folds=10,
clf="xgb",
test_max=True,
test_shuffle=True,
test_mc=False,
old_mc_weights=1,
test_predictions=False,
clf_pred="rdf",
):
"""Score for reweighting. Train clf on mc reweighted/real, test on real; minimize score.
Enter two datasets and evaluate the score described below. Return a
dictionary containing the different scores. The test_predictions is
another scoring, which is built upon the train_similar method.
**Scoring method description**
**Idea**:
A clf is trained on the reweighted mc as well as on the real data of a
certain decay. Therefore, the classifier learns to distinguish between
Monte-Carlo data and real data. Then we let the classifier predict some
real data (an unbiased test set) and see, how many he is able to classify
as real events. The lower the score, the less differences he was able to
learn from the train data therefore the more similar the train data
therefore the better the reweighting.
**Advandages**: It is quite difficult to cheat on this method. Most of all
it is robust to single high-weight events (which mcreweighted_as_real is
not) and, in general, seems to be the best scoring so far.
**Disadvantages**: If you insert a gaussian shaped 1.0 as mc and a gaussian
shaped 1.1 as real, the score will be badly (around 0.33). So far, this was
only observed for "artificial" distributions (even dough, of course, we
do not know if it affects real distributions aswell partly)
**Output explanation**
The return is a dictionary containing several values. Of course, only the
values, which are set to be evaluated, are contained. The keys are:
- '**score**' : The average of all train_similar scores (as we use KFolding,
there will be n_folds scores). *The* score.
- '**score_std**' : The std of a single score, just for curiosity
- '**score_max**' : The (average of all) "maximum" score. Actually the
train_similar score but
with mc instead of *reweighted* mc. Should be higher then the
reweighted score.
- '**score_max_std**' : The std of a single score, just for curiosity
- '**score_pred**' : The score of the test_predictions method.
- '**score_mc_pred**' : The score of the test_predictions method but on the
predictions of the mc instead of the *reweighted* mc.
Parameters
----------
mc_data : |hepds_type|
The reweighted Monte-Carlo data, assuming the new weights are applied
already.
real_data : |hepds_type|
The real data
n_checks : int >= 1
Number of checks to perform. Has to be <= n_folds
n_folds : int > 1
Number of folds the data will be split into
clf : str
The name of a classifier to be used in
:py:func:`~raredecay.analysis.ml_analysis.classify`.
test_max : boolean
If true, test for the "maximum value" by training also on mc/real
(instead of *reweighted* mc/real)
and test on real. The score for only mc should be higher than for
reweighted mc/real. It *should* most probably but does not have to
be!
old_mc_weights : array-like or 1
If *test_max* is True, the weights for mc before reweighting will be
taken to be *old_mc_weights*, the weights the mc distribution had
before the reweighting. The default is 1.
test_predictions : boolean
If true, try to distinguish the predictions. Advanced feature and not
yet really discoverd how to interpret. Gives very high ROC somehow.
clf_pred : str
The classifier to be used to distinguish the predictions. Required for
the *test_predictions*.
Return
------
out : dict
A dictionary conaining the different scores. Description see above.
"""
import raredecay.analysis.ml_analysis as ml_ana
from raredecay.globals_ import out
features = dev_tool.entries_to_str(features)
clf = dev_tool.entries_to_str(clf)
clf_pred = dev_tool.entries_to_str(clf_pred)
# initialize variables
assert (
1 <= n_checks <= n_folds and n_folds > 1
), "wrong n_checks/n_folds. Check the docs"
assert isinstance(mc_data, data_storage.HEPDataStorage), (
"mc_data wrong type:" + str(type(mc_data)) + ", has to be HEPDataStorage"
)
assert isinstance(real_data, data_storage.HEPDataStorage), (
"real_data wrong type:" + str(type(real_data)) + ", has to be HEPDataStorage"
)
# assert isinstance(clf, str),\
# "clf has to be a string, the name of a valid classifier. Check the docs!"
output = {}
scores = np.ones(n_checks)
scores_shuffled = np.ones(n_checks)
scores_mc = np.ones(n_checks)
scores_max = np.ones(n_checks) # required due to output of loop
scores_mc_max = np.ones(n_checks)
# scores_weighted = []
scores_max_weighted = []
probas_mc = []
probas_reweighted = []
weights_mc = []
weights_reweighted = []
real_pred = []
real_test_index = []
real_mc_pred = []
# initialize data
tmp_mc_targets = mc_data.get_targets()
mc_data.set_targets(0)
real_data.make_folds(n_folds=n_folds)
if test_mc:
mc_data.make_folds(n_folds=n_folds)
for fold in range(n_checks):
real_train, real_test = real_data.get_fold(fold)
if test_mc:
mc_train, mc_test = mc_data.get_fold(fold)
mc_test.set_targets(0)
else:
mc_train = mc_data.copy_storage()
mc_train.set_targets(0)
real_test.set_targets(1)
real_train.set_targets(1)
tmp_out = ml_ana.classify(
mc_train,
real_train,
validation=real_test,
clf=clf,
plot_title="train on mc reweighted/real, test on real",
weights_ratio=1,
get_predictions=True,
features=features,
plot_importance=1,
importance=1,
)
clf_trained, scores[fold], pred_reweighted = tmp_out
tmp_weights = mc_train.get_weights()
if test_shuffle:
import copy
shuffled_weights = copy.deepcopy(tmp_weights)
shuffled_weights.reindex(np.random.permutation(shuffled_weights.index))
mc_train.set_weights(shuffled_weights)
tmp_out = ml_ana.classify(
mc_train,
real_train,
validation=real_test,
clf=clf,
plot_title="train on mc reweighted/real, test on real",
weights_ratio=1,
get_predictions=True,
features=features,
plot_importance=1,
importance=1,
)
scores_shuffled[fold] = tmp_out[1]
mc_train.set_weights(tmp_weights)
if test_mc:
clf_trained, scores_mc[fold] = ml_ana.classify(
validation=mc_test,
clf=clf_trained,
plot_title="train on mc reweighted/real, test on mc",
weights_ratio=1,
get_predictions=False,
features=features,
plot_importance=1,
importance=1,
)
# del clf_trained, tmp_pred
probas_reweighted.append(pred_reweighted["y_proba"])
weights_reweighted.append(pred_reweighted["weights"])
real_pred.extend(pred_reweighted["y_pred"])
real_test_index.extend(real_test.get_index())
if test_max:
temp_weights = mc_data.get_weights()
mc_data.set_weights(old_mc_weights)
tmp_out = ml_ana.classify(
mc_data,
real_train,
validation=real_test,
plot_title="real/mc NOT reweight trained, validate on real",
weights_ratio=1,
get_predictions=True,
clf=clf,
features=features,
plot_importance=1,
importance=1,
)
clf_trained, scores_max[fold], pred_mc = tmp_out
if test_mc:
clf_trained, scores_mc_max[fold] = ml_ana.classify(
validation=mc_test,
clf=clf_trained,
plot_title="train on mc NOT reweighted/real, test on mc",
weights_ratio=1,
get_predictions=False,
features=features,
plot_importance=1,
importance=1,
)
del clf_trained
# HACK
tmp_pred = pred_mc["y_proba"][:, 1] * pred_mc["weights"]
scores_max_weighted.extend(tmp_pred * (pred_mc["y_true"] * 2 - 1))
# HACK END
mc_data.set_weights(temp_weights)
probas_mc.append(pred_mc["y_proba"])
weights_mc.append(pred_mc["weights"])
real_mc_pred.extend(pred_mc["y_pred"])
output["score"] = np.round(scores.mean(), 4)
output["score_std"] = np.round(scores.std(), 4)
if test_shuffle:
output["score_shuffled"] = np.round(scores_shuffled.mean(), 4)
output["score_shuffled_std"] = np.round(scores_shuffled.std(), 4)
if test_mc:
output["score_mc"] = np.round(scores_mc.mean(), 4)
output["score_mc_std"] = np.round(scores_mc.std(), 4)
out.add_output(
[
"Score train_similar (recall, lower means better): ",
str(output["score"]) + " +- " + str(output["score_std"]),
],
subtitle="Clf trained on real/mc reweight, tested on real",
)
if test_max:
output["score_max"] = np.round(scores_max.mean(), 4)
output["score_max_std"] = np.round(scores_max.std(), 4)
if test_mc:
output["score_mc_max"] = np.round(scores_mc_max.mean(), 4)
output["score_mc_max_std"] = np.round(scores_mc_max.std(), 4)
out.add_output(["No reweighting score: ", round(output["score_max"], 4)])
if test_predictions:
# test on the reweighted/real predictions
real_data.set_targets(targets=real_pred, index=real_test_index)
tmp_, score_pred = ml_ana.classify(
real_data,
target_from_data=True,
clf=clf_pred,
features=features,
plot_title="train on predictions reweighted/real, real as target",
weights_ratio=1,
validation=n_checks,
plot_importance=3,
)
output["score_pred"] = round(score_pred, 4)
if test_predictions and test_max:
# test on the mc/real predictions
real_data.set_targets(targets=real_mc_pred, index=real_test_index)
tmp_, score_mc_pred = ml_ana.classify(
real_data,
target_from_data=True,
clf=clf_pred,
validation=n_checks,
plot_title="mc not rew/real pred, real as target",
weights_ratio=1,
plot_importance=3,
)
output["score_mc_pred"] = np.round(score_mc_pred, 4)
mc_data.set_targets(tmp_mc_targets)
output["similar_dist"] = similar_dist(
predictions=np.concatenate(probas_reweighted)[:, 1],
weights= | np.concatenate(weights_reweighted) | numpy.concatenate |
import os
from collections import defaultdict
import numpy
from matplotlib import pyplot as plt
from csep.utils.basic_types import seq_iter, AdaptiveHistogram
from csep.utils.calc import _compute_likelihood, bin1d_vec, _compute_spatial_statistic
from csep.utils.constants import CSEP_MW_BINS, SECONDS_PER_DAY, SECONDS_PER_HOUR, SECONDS_PER_WEEK
from csep.models import EvaluationResult
from csep.core.repositories import FileSystem
from csep.utils.plots import plot_number_test, plot_magnitude_test, plot_likelihood_test, plot_spatial_test, \
plot_cumulative_events_versus_time_dev, plot_magnitude_histogram_dev, plot_distribution_test, plot_probability_test, \
plot_spatial_dataset
from csep.utils.stats import get_quantiles, cumulative_square_diff, sup_dist
# todo: refactor these methods to not perform any filtering of catalogs inside the processing task
class AbstractProcessingTask:
def __init__(self, data=None, name=None, min_mw=2.5, n_cat=None, mws=None):
self.data = data or []
# to-be deprecated
self.mws = mws or [2.5, 3.0, 3.5, 4.0, 4.5]
self.min_mw = min_mw
self.n_cat = n_cat
self.name = name
self.ax = []
self.fnames = []
self.needs_two_passes = False
self.buffer = []
self.region = None
self.buffer_fname = None
self.fhandle = None
self.archive = True
self.version = 1
@staticmethod
def _build_filename(dir, mw, plot_id):
basename = f"{plot_id}_mw_{str(mw).replace('.','p')}".lower()
return os.path.join(dir, basename)
def process(self, data):
raise NotImplementedError('must implement process()!')
def process_again(self, catalog, args=()):
""" This function defaults to pass unless the method needs to read through the data twice. """
pass
def post_process(self, obs, args=None):
"""
Compute evaluation of data stored in self.data.
Args:
obs (csep.Catalog): used to evaluate the forecast
args (tuple): args for this function
Returns:
result (csep.core.evaluations.EvaluationResult):
"""
result = EvaluationResult()
return result
def plot(self, results, plot_dir, show=False):
"""
plots function, typically just a wrapper to function in utils.plotting()
Args:
show (bool): show plot, if plotting multiple, just run on last.
filename (str): where to save the file
plot_args (dict): plotting args to pass to function
Returns:
axes (matplotlib.axes)
"""
raise NotImplementedError('must implement plot()!')
def store_results(self, results, dir):
"""
Saves evaluation results serialized into json format. This format is used to recreate the results class which
can then be plotted if desired. The following directory structure will be created:
| dir
|-- n-test
|---- n-test_mw_2.5.json
|---- n_test_mw_3.0.json
|-- m-test
|---- m_test_mw_2.5.json
|---- m_test_mw_3.0.json
...
The results iterable should only contain results for a single evaluation. Typically they would contain different
minimum magnitudes.
Args:
results (Iterable of EvaluationResult): iterable object containing evaluation results. this could be a list or tuple of lists as well
dir (str): directory to store the testing results. name will be constructed programatically.
Returns:
None
"""
success = False
if self.archive == False:
return
# handle if results is just a single result
if isinstance(results, EvaluationResult):
repo = FileSystem(url=self._build_filename(dir, results.min_mw, results.name) + '.json')
if repo.save(results.to_dict()):
success = True
return success
# or if its an iterable
for idx in seq_iter(results):
# for debugging
if isinstance(results[idx], tuple) or isinstance(results[idx], list):
result = results[idx]
else:
result = [results[idx]]
for r in result:
repo = FileSystem(url=self._build_filename(dir, r.min_mw, r.name) + '.json')
if repo.save(r.to_dict()):
success = True
return success
def store_data(self, dir):
""" Store the intermediate data used to calculate the results for the evaluations. """
raise NotImplementedError
class NumberTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mws = [2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0]
def process(self, catalog, filter=False):
if not self.name:
self.name = catalog.name
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
counts.append(cat_filt.event_count)
self.data.append(counts)
def post_process(self, obs, args=None):
# we dont need args for this function
_ = args
results = {}
data = numpy.array(self.data)
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
observation_count = obs_filt.event_count
# get delta_1 and delta_2 values
delta_1, delta_2 = get_quantiles(data[:,i], observation_count)
# prepare result
result = EvaluationResult(test_distribution=data[:,i],
name='N-Test',
observed_statistic=observation_count,
quantile=(delta_1, delta_2),
status='Normal',
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
min_mw=mw,
obs_name=obs.name)
results[mw] = result
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result in results.items():
# compute bin counts, this one is special because of integer values
td = result.test_distribution
min_bin, max_bin = numpy.min(td), numpy.max(td)
# hard-code some logic for bin size
bins = numpy.arange(min_bin, max_bin)
if len(bins) == 1:
bins = 3
n_test_fname = AbstractProcessingTask._build_filename(plot_dir, mw, 'n_test')
_ = plot_number_test(result, show=show, plot_args={'percentile': 95,
'title': f'Number Test, M{mw}+',
'bins': bins,
'filename': n_test_fname})
self.fnames.append(n_test_fname)
class MagnitudeTest(AbstractProcessingTask):
def __init__(self, mag_bins=None, **kwargs):
super().__init__(**kwargs)
self.mws = [2.5, 3.0, 3.5, 4.0]
self.mag_bins = mag_bins
self.version = 4
def process(self, catalog):
if not self.name:
self.name = catalog.name
# magnitude mag_bins should probably be bound to the region, although we should have a SpaceMagnitudeRegion class
if self.mag_bins is None:
try:
self.mag_bins = catalog.region.mag_bins
except:
self.mag_bins = CSEP_MW_BINS
# optimization idea: always compute this for the lowest magnitude, above this is redundant
mags = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
binned_mags = cat_filt.magnitude_counts(mag_bins=self.mag_bins)
mags.append(binned_mags)
# data shape (n_cat, n_mw, n_mw_bins)
self.data.append(mags)
def post_process(self, obs, args=None):
# we dont need args
_ = args
results = {}
for i, mw in enumerate(self.mws):
test_distribution = []
# get observed magnitude counts
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f"Skipping {mw} in Magnitude test because no observed events.")
continue
obs_histogram = obs_filt.magnitude_counts(mag_bins=self.mag_bins)
n_obs_events = numpy.sum(obs_histogram)
mag_counts_all = numpy.array(self.data)
# get the union histogram, simply the sum over all catalogs, (n_cat, n_mw)
union_histogram = numpy.sum(mag_counts_all[:,i,:], axis=0)
n_union_events = numpy.sum(union_histogram)
union_scale = n_obs_events / n_union_events
scaled_union_histogram = union_histogram * union_scale
for j in range(mag_counts_all.shape[0]):
n_events = numpy.sum(mag_counts_all[j,i,:])
if n_events == 0:
continue
scale = n_obs_events / n_events
catalog_histogram = mag_counts_all[j,i,:] * scale
test_distribution.append(cumulative_square_diff(numpy.log10(catalog_histogram+1), numpy.log10(scaled_union_histogram+1)))
# compute statistic from the observation
obs_d_statistic = cumulative_square_diff(numpy.log10(obs_histogram+1), numpy.log10(scaled_union_histogram+1))
# score evaluation
_, quantile = get_quantiles(test_distribution, obs_d_statistic)
# prepare result
result = EvaluationResult(test_distribution=test_distribution,
name='M-Test',
observed_statistic=obs_d_statistic,
quantile=quantile,
status='Normal',
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
obs_name=obs.name,
sim_name=self.name)
results[mw] = result
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
# get the filename
for mw, result in results.items():
m_test_fname = self._build_filename(plot_dir, mw, 'm-test')
plot_args = {'percentile': 95,
'title': f'Magnitude Test, M{mw}+',
'bins': 'auto',
'filename': m_test_fname}
_ = plot_magnitude_test(result, show=False, plot_args=plot_args)
self.fnames.append(m_test_fname)
def _build_filename(self, dir, mw, plot_id):
try:
mag_dh = self.mag_bins[1] - self.mag_bins[0]
mag_dh_str = f"_dmag{mag_dh:.1f}".replace('.','p').lower()
except:
mag_dh_str = ''
basename = f"{plot_id}_mw_{str(mw).replace('.', 'p')}{mag_dh_str}".lower()
return os.path.join(dir, basename)
class LikelihoodAndSpatialTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.region = None
self.test_distribution_spatial = []
self.test_distribution_likelihood = []
self.cat_id = 0
self.needs_two_passes = True
self.buffer = []
self.fnames = {}
self.fnames['l-test'] = []
self.fnames['s-test'] = []
self.version = 5
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_counts()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
time_horizon, n_cat, end_epoch, obs = args
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# unfortunately, we need to iterate twice through the catalogs for this, unless we start pre-processing
# everything and storing approximate cell-wise rates
lhs = numpy.zeros(len(self.mws))
lhs_norm = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_counts()
lh, lh_norm = _compute_likelihood(gridded_cat, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
lhs[i] = lh
lhs_norm[i] = lh_norm
self.test_distribution_likelihood.append(lhs)
self.test_distribution_spatial.append(lhs_norm)
def post_process(self, obs, args=None):
cata_iter, time_horizon, end_epoch, n_cat = args
results = {}
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
test_distribution_likelihood = numpy.array(self.test_distribution_likelihood)
# there can be nans in the spatial distribution
test_distribution_spatial = numpy.array(self.test_distribution_spatial)
# prepare results for each mw
for i, mw in enumerate(self.mws):
# get observed likelihood
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog.')
continue
n_obs = obs_filt.get_number_of_events()
gridded_obs = obs_filt.spatial_counts()
obs_lh, obs_lh_norm = _compute_likelihood(gridded_obs, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
# if obs_lh is -numpy.inf, recompute but only for indexes where obs and simulated are non-zero
message = "normal"
if obs_lh == -numpy.inf or obs_lh_norm == -numpy.inf:
idx_good_sim = apprx_rate_density[i,:] != 0
new_gridded_obs = gridded_obs[idx_good_sim]
new_n_obs = numpy.sum(new_gridded_obs)
print(f"Found -inf as the observed likelihood score for M{self.mws[i]}+. "
f"Assuming event(s) occurred in undersampled region of forecast.\n"
f"Recomputing with {new_n_obs} events after removing {n_obs - new_n_obs} events.")
if new_n_obs == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog '
f'after correcting for under-sampling in forecast.')
continue
new_ard = apprx_rate_density[i,idx_good_sim]
# we need to use the old n_obs here, because if we normalize the ard to a different value the observed
# statistic will not be computed correctly.
obs_lh, obs_lh_norm = _compute_likelihood(new_gridded_obs, new_ard, expected_cond_count[i], n_obs)
message = "undersampled"
# determine outcome of evaluation, check for infinity
_, quantile_likelihood = get_quantiles(test_distribution_likelihood[:,i], obs_lh)
# build evaluation result
result_likelihood = EvaluationResult(test_distribution=test_distribution_likelihood[:,i],
name='L-Test',
observed_statistic=obs_lh,
quantile=quantile_likelihood,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
# check for nans here
test_distribution_spatial_1d = test_distribution_spatial[:,i]
if numpy.isnan(numpy.sum(test_distribution_spatial_1d)):
test_distribution_spatial_1d = test_distribution_spatial_1d[~numpy.isnan(test_distribution_spatial_1d)]
if n_obs == 0 or numpy.isnan(obs_lh_norm):
message = "not-valid"
quantile_spatial = -1
else:
_, quantile_spatial = get_quantiles(test_distribution_spatial_1d, obs_lh_norm)
result_spatial = EvaluationResult(test_distribution=test_distribution_spatial_1d,
name='S-Test',
observed_statistic=obs_lh_norm,
quantile=quantile_spatial,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = (result_likelihood, result_spatial)
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result_tuple in results.items():
# plot likelihood test
l_test_fname = self._build_filename(plot_dir, mw, 'l-test')
plot_args = {'percentile': 95,
'title': f'Pseudo-Likelihood Test, M{mw}+',
'filename': l_test_fname}
_ = plot_likelihood_test(result_tuple[0], axes=None, plot_args=plot_args, show=show)
# we can access this in the main program if needed
# self.ax.append((ax, spatial_ax))
self.fnames['l-test'].append(l_test_fname)
if result_tuple[1].status == 'not-valid':
print(f'Skipping plot for spatial test on {mw}. Test results are not valid, likely because no earthquakes observed in target observed_catalog.')
continue
# plot spatial test
s_test_fname = self._build_filename(plot_dir, mw, 's-test')
plot_args = {'percentile': 95,
'title': f'Spatial Test, M{mw}+',
'filename': s_test_fname}
_ = plot_spatial_test(result_tuple[1], axes=None, plot_args=plot_args, show=False)
self.fnames['s-test'].append(s_test_fname)
class SpatialTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.region = None
self.test_distribution_spatial = []
self.cat_id = 0
self.needs_two_passes = True
self.buffer = []
self.fnames = {}
self.fnames['s-test'] = []
self.version = 5
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_counts()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
time_horizon, n_cat, end_epoch, obs = args
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# unfortunately, we need to iterate twice through the catalogs for this, unless we start pre-processing
# everything and storing approximate cell-wise rates
lhs = numpy.zeros(len(self.mws))
lhs_norm = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_counts()
lh, lh_norm = _compute_likelihood(gridded_cat, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
lhs[i] = lh
lhs_norm[i] = lh_norm
self.test_distribution_spatial.append(lhs_norm)
def post_process(self, obs, args=None):
cata_iter, time_horizon, end_epoch, n_cat = args
results = {}
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# there can be nans in the spatial distribution
test_distribution_spatial = numpy.array(self.test_distribution_spatial)
# prepare results for each mw
for i, mw in enumerate(self.mws):
# get observed likelihood
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog.')
continue
n_obs = obs_filt.get_number_of_events()
gridded_obs = obs_filt.spatial_counts()
obs_lh, obs_lh_norm = _compute_likelihood(gridded_obs, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
# if obs_lh is -numpy.inf, recompute but only for indexes where obs and simulated are non-zero
message = "normal"
if obs_lh == -numpy.inf or obs_lh_norm == -numpy.inf:
idx_good_sim = apprx_rate_density[i,:] != 0
new_gridded_obs = gridded_obs[idx_good_sim]
new_n_obs = numpy.sum(new_gridded_obs)
print(f"Found -inf as the observed likelihood score for M{self.mws[i]}+. "
f"Assuming event(s) occurred in undersampled region of forecast.\n"
f"Recomputing with {new_n_obs} events after removing {n_obs - new_n_obs} events.")
if new_n_obs == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog '
f'after correcting for under-sampling in forecast.')
continue
new_ard = apprx_rate_density[i,idx_good_sim]
# we need to use the old n_obs here, because if we normalize the ard to a different value the observed
# statistic will not be computed correctly.
obs_lh, obs_lh_norm = _compute_likelihood(new_gridded_obs, new_ard, expected_cond_count[i], n_obs)
message = "undersampled"
# check for nans here
test_distribution_spatial_1d = test_distribution_spatial[:,i]
if numpy.isnan(numpy.sum(test_distribution_spatial_1d)):
test_distribution_spatial_1d = test_distribution_spatial_1d[~numpy.isnan(test_distribution_spatial_1d)]
if n_obs == 0 or numpy.isnan(obs_lh_norm):
message = "not-valid"
quantile_spatial = -1
else:
_, quantile_spatial = get_quantiles(test_distribution_spatial_1d, obs_lh_norm)
result_spatial = EvaluationResult(test_distribution=test_distribution_spatial_1d,
name='S-Test',
observed_statistic=obs_lh_norm,
quantile=quantile_spatial,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = result_spatial
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result in results.items():
if result.status == 'not-valid':
print(f'Skipping plot for spatial test on {mw}. Test results are not valid, likely because no earthquakes observed in target observed_catalog.')
continue
# plot spatial test
s_test_fname = self._build_filename(plot_dir, mw, 's-test')
plot_args = {'percentile': 95,
'title': f'Spatial Test, M{mw}+',
'filename': s_test_fname}
_ = plot_spatial_test(result, axes=None, plot_args=plot_args, show=False)
self.fnames['s-test'].append(s_test_fname)
class LikelihoodTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.region = None
self.test_distribution_likelihood = []
self.cat_id = 0
self.needs_two_passes = True
self.buffer = []
self.fnames = {}
self.fnames['l-test'] = []
self.fnames['s-test'] = []
self.version = 5
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_counts()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
time_horizon, n_cat, end_epoch, obs = args
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# unfortunately, we need to iterate twice through the catalogs for this, unless we start pre-processing
# everything and storing approximate cell-wise rates
lhs = numpy.zeros(len(self.mws))
lhs_norm = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_counts()
lh, lh_norm = _compute_likelihood(gridded_cat, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
lhs[i] = lh
lhs_norm[i] = lh_norm
self.test_distribution_likelihood.append(lhs)
def post_process(self, obs, args=None):
cata_iter, time_horizon, end_epoch, n_cat = args
results = {}
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
test_distribution_likelihood = numpy.array(self.test_distribution_likelihood)
# there can be nans in the spatial distribution
# prepare results for each mw
for i, mw in enumerate(self.mws):
# get observed likelihood
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog.')
continue
n_obs = obs_filt.get_number_of_events()
gridded_obs = obs_filt.spatial_counts()
obs_lh, obs_lh_norm = _compute_likelihood(gridded_obs, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
# if obs_lh is -numpy.inf, recompute but only for indexes where obs and simulated are non-zero
message = "normal"
if obs_lh == -numpy.inf or obs_lh_norm == -numpy.inf:
idx_good_sim = apprx_rate_density[i,:] != 0
new_gridded_obs = gridded_obs[idx_good_sim]
new_n_obs = numpy.sum(new_gridded_obs)
print(f"Found -inf as the observed likelihood score for M{self.mws[i]}+. "
f"Assuming event(s) occurred in undersampled region of forecast.\n"
f"Recomputing with {new_n_obs} events after removing {n_obs - new_n_obs} events.")
if new_n_obs == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog '
f'after correcting for under-sampling in forecast.')
continue
new_ard = apprx_rate_density[i,idx_good_sim]
# we need to use the old n_obs here, because if we normalize the ard to a different value the observed
# statistic will not be computed correctly.
obs_lh, obs_lh_norm = _compute_likelihood(new_gridded_obs, new_ard, expected_cond_count[i], n_obs)
message = "undersampled"
# determine outcome of evaluation, check for infinity
_, quantile_likelihood = get_quantiles(test_distribution_likelihood[:,i], obs_lh)
# build evaluation result
result_likelihood = EvaluationResult(test_distribution=test_distribution_likelihood[:,i],
name='L-Test',
observed_statistic=obs_lh,
quantile=quantile_likelihood,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = result_likelihood
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result in results.items():
# plot likelihood test
l_test_fname = self._build_filename(plot_dir, mw, 'l-test')
plot_args = {'percentile': 95,
'title': f'Pseudo-Likelihood Test, M{mw}+',
'filename': l_test_fname}
_ = plot_likelihood_test(result, axes=None, plot_args=plot_args, show=show)
# we can access this in the main program if needed
# self.ax.append((ax, spatial_ax))
self.fnames['s-test'].append(l_test_fname)
class CumulativeEventPlot(AbstractProcessingTask):
def __init__(self, origin_epoch, end_epoch, **kwargs):
super().__init__(**kwargs)
self.origin_epoch = origin_epoch
self.end_epoch = end_epoch
self.time_bins, self.dt = self._get_time_bins()
self.n_bins = self.time_bins.shape[0]
self.archive = False
def _get_time_bins(self):
diff = (self.end_epoch - self.origin_epoch) / SECONDS_PER_DAY / 1000
# if less than 7 day use hours
if diff <= 7.0:
dt = SECONDS_PER_HOUR * 1000
# if less than 180 day use days
elif diff <= 180:
dt = SECONDS_PER_DAY * 1000
# if less than 3 years (1,095.75 days) use weeks
elif diff <= 1095.75:
dt = SECONDS_PER_WEEK * 1000
# use 30 day
else:
dt = SECONDS_PER_DAY * 1000 * 30
# always make bins from start to end of observed_catalog
return numpy.arange(self.origin_epoch, self.end_epoch+dt/2, dt), dt
def process(self, catalog):
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
n_events = cat_filt.catalog.shape[0]
ses_origin_time = cat_filt.get_epoch_times()
inds = bin1d_vec(ses_origin_time, self.time_bins)
binned_counts = numpy.zeros(self.n_bins)
for j in range(n_events):
binned_counts[inds[j]] += 1
counts.append(binned_counts)
self.data.append(counts)
def post_process(self, obs, args=None):
# data are stored as (n_cat, n_mw_bins, n_time_bins)
summed_counts = numpy.cumsum(self.data, axis=2)
# compute summary statistics for plotting
fifth_per = numpy.percentile(summed_counts, 5, axis=0)
first_quar = numpy.percentile(summed_counts, 25, axis=0)
med_counts = numpy.percentile(summed_counts, 50, axis=0)
second_quar = numpy.percentile(summed_counts, 75, axis=0)
nine_fifth = numpy.percentile(summed_counts, 95, axis=0)
# compute median for comcat observed_catalog
obs_counts = []
for mw in self.mws:
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
obs_binned_counts = numpy.zeros(self.n_bins)
inds = bin1d_vec(obs_filt.get_epoch_times(), self.time_bins)
for j in range(obs_filt.event_count):
obs_binned_counts[inds[j]] += 1
obs_counts.append(obs_binned_counts)
obs_summed_counts = numpy.cumsum(obs_counts, axis=1)
# update time_bins for plotting
millis_to_hours = 60 * 60 * 1000 * 24
time_bins = (self.time_bins - self.time_bins[0]) / millis_to_hours
# since we are cumulating, plot at bin ends
time_bins = time_bins + (self.dt / millis_to_hours)
# make all arrays start at zero
time_bins = numpy.insert(time_bins, 0, 0)
# 2d array with (n_mw, n_time_bins)
fifth_per = numpy.insert(fifth_per, 0, 0, axis=1)
first_quar = numpy.insert(first_quar, 0, 0, axis=1)
med_counts = numpy.insert(med_counts, 0, 0, axis=1)
second_quar = numpy.insert(second_quar, 0, 0, axis=1)
nine_fifth = numpy.insert(nine_fifth, 0, 0, axis=1)
obs_summed_counts = numpy.insert(obs_summed_counts, 0, 0, axis=1)
# ydata is now (5, n_mw, n_time_bins)
results = {'xdata': time_bins,
'ydata': (fifth_per, first_quar, med_counts, second_quar, nine_fifth),
'obs_data': obs_summed_counts}
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
# these are numpy arrays with mw information
xdata = results['xdata']
ydata = numpy.array(results['ydata'])
obs_data = results['obs_data']
# get values from plotting args
for i, mw in enumerate(self.mws):
cum_counts_fname = self._build_filename(plot_dir, mw, 'cum_counts')
plot_args = {'title': f'Cumulative Event Counts, M{mw}+',
'xlabel': 'Days since start of forecast',
'filename': cum_counts_fname}
ax = plot_cumulative_events_versus_time_dev(xdata, ydata[:,i,:], obs_data[i,:], plot_args, show=False)
# self.ax.append(ax)
self.fnames.append(cum_counts_fname)
def store_results(self, results, dir):
# store quickly for numpy, because we dont have a results class to deal with this
fname = self._build_filename(dir, self.mws[0], 'cum_counts') + '.npy'
numpy.save(fname, results)
class MagnitudeHistogram(AbstractProcessingTask):
def __init__(self, calc=True, **kwargs):
super().__init__(**kwargs)
self.calc = calc
self.archive = False
def process(self, catalog):
""" this can share data with the Magnitude test, hence self.calc
"""
if not self.name:
self.name = catalog.name
if self.calc:
# always compute this for the lowest magnitude, above this is redundant
cat_filt = catalog.filter(f'magnitude >= {self.mws[0]}')
binned_mags = cat_filt.magnitude_counts()
self.data.append(binned_mags)
def post_process(self, obs, args=None):
""" just store observation for later """
_ = args
self.obs = obs
def plot(self, results, plot_dir, plot_args=None, show=False):
mag_hist_fname = self._build_filename(plot_dir, self.mws[0], 'mag_hist')
plot_args = {
'xlim': [self.mws[0], numpy.max(CSEP_MW_BINS)],
'title': f"Magnitude Histogram, M{self.mws[0]}+",
'sim_label': self.name,
'obs_label': self.obs.name,
'filename': mag_hist_fname
}
obs_filt = self.obs.filter(f'magnitude >= {self.mws[0]}', in_place=False)
# data (n_sim, n_mag, n_mw_bins)
ax = plot_magnitude_histogram_dev(numpy.array(self.data)[:,0,:], obs_filt, plot_args, show=False)
# self.ax.append(ax)
self.fnames.append(mag_hist_fname)
class UniformLikelihoodCalculation(AbstractProcessingTask):
"""
This calculation assumes that the spatial distribution of the forecast is uniform, but the seismicity is located
in spatial bins according to the clustering provided by the forecast model.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.data = None
self.test_distribution_likelihood = []
self.test_distribution_spatial = []
self.fnames = {}
self.fnames['l-test'] = []
self.fnames['s-test'] = []
self.needs_two_passes = True
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
def process_again(self, catalog, args=()):
time_horizon, n_cat, end_epoch, obs = args
expected_cond_count = numpy.sum(self.data, axis=1) / n_cat
lhs = numpy.zeros(len(self.mws))
lhs_norm = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
# generate with uniform rate in every spatial bin
apprx_rate_density = expected_cond_count[i] * numpy.ones(self.region.num_nodes) / self.region.num_nodes
# convert to rate density
apprx_rate_density = apprx_rate_density / self.region.dh / self.region.dh / time_horizon
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_counts()
lh, lh_norm = _compute_likelihood(gridded_cat, apprx_rate_density, expected_cond_count[i], n_obs)
lhs[i] = lh
lhs_norm[i] = lh_norm
self.test_distribution_likelihood.append(lhs)
self.test_distribution_spatial.append(lhs_norm)
def post_process(self, obs, args=None):
_, time_horizon, _, n_cat = args
results = {}
expected_cond_count = numpy.sum(self.data, axis=1) / n_cat
test_distribution_likelihood = numpy.array(self.test_distribution_likelihood)
test_distribution_spatial = numpy.array(self.test_distribution_spatial)
for i, mw in enumerate(self.mws):
# create uniform apprx rate density
apprx_rate_density = expected_cond_count[i] * numpy.ones(self.region.num_nodes) / self.region.num_nodes
# convert to rate density
apprx_rate_density = apprx_rate_density / self.region.dh / self.region.dh / time_horizon
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.get_number_of_events()
gridded_obs = obs_filt.spatial_counts()
obs_lh, obs_lh_norm = _compute_likelihood(gridded_obs, apprx_rate_density, expected_cond_count[i],
n_obs)
# determine outcome of evaluation, check for infinity
_, quantile_likelihood = get_quantiles(test_distribution_likelihood[:, i], obs_lh)
_, quantile_spatial = get_quantiles(test_distribution_spatial[:, i], obs_lh_norm)
# Signals outcome of test
message = "normal"
# Deal with case with cond. rate. density func has zeros. Keep value but flag as being
# either normal and wrong or udetermined (undersampled)
if numpy.isclose(quantile_likelihood, 0.0) or numpy.isclose(quantile_likelihood, 1.0):
# undetermined failure of the test
if numpy.isinf(obs_lh):
# Build message
message = "undetermined"
# build evaluation result
result_likelihood = EvaluationResult(test_distribution=test_distribution_likelihood[:, i],
name='UL-Test',
observed_statistic=obs_lh,
quantile=quantile_likelihood,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
# find out if there are issues with the test
if numpy.isclose(quantile_spatial, 0.0) or numpy.isclose(quantile_spatial, 1.0):
# undetermined failure of the test
if numpy.isinf(obs_lh_norm):
# Build message
message = "undetermined"
if n_obs == 0:
message = 'not-valid'
result_spatial = EvaluationResult(test_distribution=test_distribution_spatial[:, i],
name='US-Test',
observed_statistic=obs_lh_norm,
quantile=quantile_spatial,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = (result_likelihood, result_spatial)
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result_tuple in results.items():
# plot likelihood test
l_test_fname = self._build_filename(plot_dir, mw, 'ul-test')
plot_args = {'percentile': 95,
'title': f'Pseudo-Likelihood Test\nMw > {mw}',
'bins': 'fd',
'filename': l_test_fname}
_ = plot_likelihood_test(result_tuple[0], axes=None, plot_args=plot_args, show=show)
# we can access this in the main program if needed
# self.ax.append((ax, spatial_ax))
self.fnames['l-test'].append(l_test_fname)
if result_tuple[1].status == 'not-valid':
print(
f'Skipping plot for spatial test on {mw}. Test results are not valid, likely because no earthquakes observed in target observed_catalog.')
continue
# plot spatial test
s_test_fname = self._build_filename(plot_dir, mw, 'us-test')
plot_args = {'percentile': 95,
'title': f'Spatial Test\nMw > {mw}',
'bins': 'fd',
'filename': s_test_fname}
_ = plot_spatial_test(result_tuple[1], axes=None, plot_args=plot_args, show=False)
self.fnames['s-test'].append(s_test_fname)
class InterEventTimeDistribution(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mws = [2.5]
# but this should be smart based on the length of the observed_catalog
self.data = AdaptiveHistogram(dh=0.1)
self.test_distribution = []
self.needs_two_passes = True
# jsut saves soem computation bc we only need to compute this once
self.normed_data = numpy.array([])
self.version = 2
def process(self, catalog):
if self.name is None:
self.name = catalog.name
cat_ietd = catalog.get_inter_event_times()
self.data.add(cat_ietd)
def process_again(self, catalog, args=()):
cat_ietd = catalog.get_inter_event_times()
disc_ietd = numpy.zeros(len(self.data.bins))
idx = bin1d_vec(cat_ietd, self.data.bins)
numpy.add.at(disc_ietd, idx, 1)
disc_ietd_normed = numpy.cumsum(disc_ietd) / numpy.sum(disc_ietd)
if self.normed_data.size == 0:
self.normed_data = numpy.cumsum(self.data.data) / numpy.sum(self.data.data)
self.test_distribution.append(sup_dist(self.normed_data, disc_ietd_normed))
def post_process(self, obs, args=None):
# get inter-event times from observed_catalog
obs_filt = obs.filter(f'magnitude >= {self.mws[0]}', in_place=False)
obs_ietd = obs_filt.get_inter_event_times()
obs_disc_ietd = numpy.zeros(len(self.data.bins))
idx = bin1d_vec(obs_ietd, self.data.bins)
numpy.add.at(obs_disc_ietd, idx, 1)
obs_disc_ietd_normed = numpy.cumsum(obs_disc_ietd) / numpy.trapz(obs_disc_ietd)
d_obs = sup_dist(self.normed_data, obs_disc_ietd_normed)
_, quantile = get_quantiles(self.test_distribution, d_obs)
result = EvaluationResult(test_distribution=self.test_distribution,
name='IETD-Test',
observed_statistic=d_obs,
quantile=quantile,
status='Normal',
min_mw=self.mws[0],
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
return result
def plot(self, results, plot_dir, plot_args=None, show=False):
ietd_test_fname = self._build_filename(plot_dir, results.min_mw, 'ietd_test')
_ = plot_distribution_test(results, show=False, plot_args={'percentile': 95,
'title': f'Inter-event Time Distribution Test, M{results.min_mw}+',
'bins': 'auto',
'xlabel': "D* Statistic",
'ylabel': r"Number of catalogs",
'filename': ietd_test_fname})
self.fnames.append(ietd_test_fname)
class InterEventDistanceDistribution(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mws = [2.5]
# start by using a 10 second bin for discretizing data
# but this should be smart based on the length of the observed_catalog
self.data = AdaptiveHistogram(dh=1)
self.test_distribution = []
self.needs_two_passes = True
# jsut saves soem computation bc we only need to compute this once
self.normed_data = numpy.array([])
self.version = 2
def process(self, catalog):
""" not nice on the memorys. """
if self.name is None:
self.name = catalog.name
# distances are in kilometers
cat_iedd = catalog.get_inter_event_distances()
self.data.add(cat_iedd)
def process_again(self, catalog, args=()):
cat_iedd = catalog.get_inter_event_distances()
disc_iedd = numpy.zeros(len(self.data.bins))
idx = bin1d_vec(cat_iedd, self.data.bins)
numpy.add.at(disc_iedd, idx, 1)
disc_iedd_normed = numpy.cumsum(disc_iedd) / numpy.sum(disc_iedd)
if self.normed_data.size == 0:
self.normed_data = numpy.cumsum(self.data.data) / numpy.sum(self.data.data)
self.test_distribution.append(sup_dist(self.normed_data, disc_iedd_normed))
def post_process(self, obs, args=None):
# get inter-event times from data
obs_filt = obs.filter(f'magnitude >= {self.mws[0]}', in_place=False)
obs_iedd = obs_filt.get_inter_event_distances()
obs_disc_iedd = numpy.zeros(len(self.data.bins))
idx = bin1d_vec(obs_iedd, self.data.bins)
numpy.add.at(obs_disc_iedd, idx, 1)
obs_disc_iedd_normed = numpy.cumsum(obs_disc_iedd) / numpy.trapz(obs_disc_iedd)
d_obs = sup_dist(self.normed_data, obs_disc_iedd_normed)
_, quantile = get_quantiles(self.test_distribution, d_obs)
result = EvaluationResult(test_distribution=self.test_distribution,
name='IEDD-Test',
observed_statistic=d_obs,
quantile=quantile,
status='Normal',
min_mw=self.mws[0],
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
return result
def plot(self, results, plot_dir, plot_args=None, show=False):
iedd_test_fname = self._build_filename(plot_dir, results.min_mw, 'iedd_test')
_ = plot_distribution_test(results, show=False, plot_args={'percentile': 95,
'title': f'Inter-event Distance Distribution Test, M{results.min_mw}+',
'bins': 'auto',
'xlabel': "D* statistic",
'ylabel': r"Number of catalogs",
'filename': iedd_test_fname})
self.fnames.append(iedd_test_fname)
class TotalEventRateDistribution(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.needs_two_passes = True
self.data = AdaptiveHistogram(dh=1)
self.normed_data = numpy.array([])
self.test_distribution = []
self.version = 2
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from observed_catalog
gridded_counts = catalog.spatial_counts()
self.data.add(gridded_counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
_, n_cat, _, _ = args
cat_counts = catalog.spatial_counts()
cat_disc = numpy.zeros(len(self.data.bins))
idx = bin1d_vec(cat_counts, self.data.bins)
numpy.add.at(cat_disc, idx, 1)
disc_terd_normed = numpy.cumsum(cat_disc) / numpy.sum(cat_disc)
if self.normed_data.size == 0:
self.normed_data = numpy.cumsum(self.data.data) / numpy.sum(self.data.data)
self.test_distribution.append(sup_dist(self.normed_data, disc_terd_normed))
def post_process(self, obs, args=None):
# get inter-event times from observed_catalog
obs_filt = obs.filter(f'magnitude >= {self.mws[0]}', in_place=False)
obs_terd = obs_filt.spatial_counts()
obs_disc_terd = numpy.zeros(len(self.data.bins))
idx = bin1d_vec(obs_terd, self.data.bins)
numpy.add.at(obs_disc_terd, idx, 1)
obs_disc_terd_normed = numpy.cumsum(obs_disc_terd) / numpy.sum(obs_disc_terd)
d_obs = sup_dist(self.normed_data, obs_disc_terd_normed)
_, quantile = get_quantiles(self.test_distribution, d_obs)
result = EvaluationResult(test_distribution=self.test_distribution,
name='TERD-Test',
observed_statistic=d_obs,
quantile=quantile,
status='Normal',
min_mw=self.mws[0],
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
return result
def plot(self, results, plot_dir, plot_args=None, show=False):
terd_test_fname = AbstractProcessingTask._build_filename(plot_dir, results.min_mw, 'terd_test')
_ = plot_distribution_test(results, show=False, plot_args={'percentile': 95,
'title': f'Total Event Rate Distribution-Test, M{results.min_mw}+',
'bins': 'auto',
'xlabel': "D* Statistic",
'ylabel': r"Number of catalogs",
'filename': terd_test_fname})
self.fnames.append(terd_test_fname)
class BValueTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.version = 2
def process(self, catalog):
if not self.name:
self.name = catalog.name
cat_filt = catalog.filter(f'magnitude >= {self.mws[0]}', in_place=False)
self.data.append(cat_filt.get_bvalue(reterr=False))
def post_process(self, obs, args=None):
_ = args
data = numpy.array(self.data)
obs_filt = obs.filter(f'magnitude >= {self.mws[0]}', in_place=False)
obs_bval = obs_filt.get_bvalue(reterr=False)
# get delta_1 and delta_2 values
_, delta_2 = get_quantiles(data, obs_bval)
# prepare result
result = EvaluationResult(test_distribution=data,
name='BV-Test',
observed_statistic=obs_bval,
quantile=delta_2,
status='Normal',
min_mw=self.mws[0],
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
return result
def plot(self, results, plot_dir, plot_args=None, show=False):
bv_test_fname = self._build_filename(plot_dir, results.min_mw, 'bv_test')
_ = plot_number_test(results, show=False, plot_args={'percentile': 95,
'title': f"B-Value Distribution Test, M{results.min_mw}+",
'bins': 'auto',
'xlabel': 'b-value',
'xy': (0.2, 0.65),
'filename': bv_test_fname})
self.fnames.append(bv_test_fname)
class MedianMagnitudeTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def process(self, catalog):
if not self.name:
self.name = catalog.name
cat_filt = catalog.filter(f'magnitude >= {self.mws[0]}', in_place=False)
self.data.append(numpy.median(cat_filt.get_magnitudes()))
def post_process(self, obs, args=None):
_ = args
data = numpy.array(self.data)
obs_filt = obs.filter(f'magnitude >= {self.mws[0]}', in_place=False)
observation_count = float(numpy.median(obs_filt.get_magnitudes()))
# get delta_1 and delta_2 values
_, delta_2 = get_quantiles(data, observation_count)
# prepare result
result = EvaluationResult(test_distribution=data,
name='M-Test',
observed_statistic=observation_count,
quantile=delta_2,
min_mw=self.mws[0],
status='Normal',
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
return result
def plot(self, results, plot_dir, plot_args=None, show=False):
mm_test_fname = self._build_filename(plot_dir, self.mws[0], 'mm_test')
_ = plot_number_test(results, show=False, plot_args={'percentile': 95,
'title': f"Median Magnitude Distribution Test\nMw > {self.mws[0]}",
'bins': 25,
'filename': mm_test_fname})
self.fnames.append(mm_test_fname)
class SpatialProbabilityTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.region = None
self.test_distribution = []
self.needs_two_passes = True
self.buffer = []
self.fnames = []
self.version = 3
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_event_probability()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
time_horizon, n_cat, end_epoch, obs = args
with numpy.errstate(divide='ignore'):
prob_map = numpy.log10(self.data / n_cat)
# unfortunately, we need to iterate twice through the catalogs for this.
probs = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_event_probability()
prob = _compute_spatial_statistic(gridded_cat, prob_map[i, :])
probs[i] = prob
self.test_distribution.append(probs)
def post_process(self, obs, args=None):
cata_iter, time_horizon, end_epoch, n_cat = args
results = {}
with numpy.errstate(divide='ignore'):
prob_map = numpy.log10(self.data / n_cat)
test_distribution_prob = numpy.array(self.test_distribution)
# prepare results for each mw
for i, mw in enumerate(self.mws):
# get observed likelihood
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f'Skipping Probability test for Mw {mw} because no events in observed observed_catalog.')
continue
gridded_obs = obs_filt.spatial_event_probability()
obs_prob = _compute_spatial_statistic(gridded_obs, prob_map[i, :])
# determine outcome of evaluation, check for infinity will go here...
test_1d = test_distribution_prob[:,i]
if numpy.isnan(numpy.sum(test_1d)):
test_1d = test_1d[~numpy.isnan(test_1d)]
_, quantile_likelihood = get_quantiles(test_1d, obs_prob)
# Signals outcome of test
message = "normal"
# Deal with case with cond. rate. density func has zeros. Keep value but flag as being
# either normal and wrong or udetermined (undersampled)
if numpy.isclose(quantile_likelihood, 0.0) or numpy.isclose(quantile_likelihood, 1.0):
# undetermined failure of the test
if numpy.isinf(obs_prob):
# Build message, should maybe try sampling procedure from pseudo-likelihood based tests
message = "undetermined"
# build evaluation result
result_prob = EvaluationResult(test_distribution=test_1d,
name='Prob-Test',
observed_statistic=obs_prob,
quantile=quantile_likelihood,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = result_prob
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result in results.items():
# plot likelihood test
prob_test_fname = self._build_filename(plot_dir, mw, 'prob-test')
plot_args = {'percentile': 95,
'title': f'Probability Test, M{mw}+',
'bins': 'auto',
'xlabel': 'Spatial probability statistic',
'ylabel': 'Number of catalogs',
'filename': prob_test_fname}
_ = plot_probability_test(result, axes=None, plot_args=plot_args, show=show)
self.fnames.append(prob_test_fname)
class SpatialProbabilityPlot(AbstractProcessingTask):
def __init__(self, calc=True, **kwargs):
super().__init__(**kwargs)
self.calc=calc
self.region=None
self.archive=False
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
if self.calc:
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_event_probability()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def post_process(self, obs, args=None):
""" store things for later """
self.obs = obs
_, time_horizon, _, n_cat = args
self.time_horizon = time_horizon
self.n_cat = n_cat
return None
def plot(self, results, plot_dir, plot_args=None, show=False):
with numpy.errstate(divide='ignore'):
prob = numpy.log10(numpy.array(self.data) / self.n_cat)
for i, mw in enumerate(self.mws):
# compute expected rate density
obs_filt = self.obs.filter(f'magnitude >= {mw}', in_place=False)
plot_data = self.region.get_cartesian(prob[i,:])
ax = plot_spatial_dataset(plot_data,
self.region,
plot_args={'clabel': r'Log$_{10}$ Probability 1 or more events'
'\n'
f'within {self.region.dh}°x{self.region.dh}° cells',
'clim': [-5, 0],
'title': f'Spatial Probability Plot, M{mw}+'})
ax.scatter(obs_filt.get_longitudes(), obs_filt.get_latitudes(), marker='.', color='white', s=40, edgecolors='black')
crd_fname = self._build_filename(plot_dir, mw, 'prob_obs')
ax.figure.savefig(crd_fname + '.png')
ax.figure.savefig(crd_fname + '.pdf')
self.fnames.append(crd_fname)
class ApproximateRatePlot(AbstractProcessingTask):
def __init__(self, calc=True, **kwargs):
super().__init__(**kwargs)
self.calc=calc
self.region=None
self.archive = False
self.version = 2
def process(self, data):
# grab stuff from data that we might need later
if not self.region:
self.region = data.region
if not self.name:
self.name = data.name
if self.calc:
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = data.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_counts()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def post_process(self, obs, args=None):
""" store things for later """
self.obs = obs
_, time_horizon, _, n_cat = args
self.time_horizon = time_horizon
self.n_cat = n_cat
return None
def plot(self, results, plot_dir, plot_args=None, show=False):
with numpy.errstate(divide='ignore'):
crd = numpy.log10(numpy.array(self.data) / self.n_cat)
for i, mw in enumerate(self.mws):
# compute expected rate density
obs_filt = self.obs.filter(f'magnitude >= {mw}', in_place=False)
plot_data = self.region.get_cartesian(crd[i,:])
ax = plot_spatial_dataset(plot_data,
self.region,
plot_args={'clabel': r'Log$_{10}$ Approximate rate density'
'\n'
f'(Expected events per week per {self.region.dh}°x{self.region.dh}°)',
'clim': [-5, 0],
'title': f'Approximate Rate Density with Observations, M{mw}+'})
ax.scatter(obs_filt.get_longitudes(), obs_filt.get_latitudes(), marker='.', color='white', s=40, edgecolors='black')
crd_fname = self._build_filename(plot_dir, mw, 'crd_obs')
ax.figure.savefig(crd_fname + '.png')
ax.figure.savefig(crd_fname + '.pdf')
# self.ax.append(ax)
self.fnames.append(crd_fname)
class ApproximateRateDensity(AbstractProcessingTask):
def __init__(self, calc=True, **kwargs):
super().__init__(**kwargs)
self.calc = calc
self.region = None
self.archive = False
self.mag_dh = None
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
if not self.mag_dh:
mag_dh = self.region.magnitudes[1] - self.region.magnitudes[0]
self.mag_dh = mag_dh
if self.calc:
# compute stuff from data
gridded_counts = catalog.spatial_magnitude_counts()
# we want to aggregate the counts in each bin to preserve memory
if self.n_cat is not None:
if len(self.data) == 0:
self.data = numpy.array(gridded_counts) / self.n_cat
else:
self.data += numpy.array(gridded_counts) / self.n_cat
else:
if len(self.data) == 0:
self.data = numpy.array(gridded_counts)
else:
self.data += numpy.array(gridded_counts)
def post_process(self, obs, args=()):
""" store things for later, and call if n_cat was not availabe at run-time for some reason. """
self.obs = obs
_, time_horizon, _, n_cat = args
self.time_horizon = time_horizon
self.n_cat = n_cat
with numpy.errstate(divide='ignore'):
self.crd = numpy.array(self.data) / self.n_cat
return None
def plot(self, results, plot_dir, plot_args=None, show=False):
# compute expected rate density
with numpy.errstate(divide='ignore'):
plot_data = numpy.log10(self.region.get_cartesian(self.crd))
ax = plot_spatial_dataset(plot_data,
self.region,
plot_args={'clabel': r'Log$_{10}$ Approximate Rate Density'
'\n'
f'(Expected Events per year per {self.region.dh}°x{self.region.dh}°) per {self.mag_dh} Mw',
'clim': [0, 5],
'title': f'Approximate Rate Density with Observations, M{self.min_mw}+'})
ax.scatter(self.obs.get_longitudes(), self.obs.get_latitudes(), marker='.', color='white', s=40, edgecolors='black')
crd_fname = self._build_filename(plot_dir, self.min_mw, 'crd_obs')
ax.figure.savefig(crd_fname + '.png')
ax.figure.savefig(crd_fname + '.pdf')
# self.ax.append(ax)
self.fnames.append(crd_fname)
class ApproximateSpatialRateDensity(AbstractProcessingTask):
def __init__(self, calc=True, **kwargs):
super().__init__(**kwargs)
self.calc = calc
self.region = None
self.archive = False
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
if self.calc:
# compute stuff from data
gridded_counts = catalog.spatial_counts()
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(gridded_counts)
else:
self.data += numpy.array(gridded_counts)
def post_process(self, obs, args=()):
""" store things for later """
self.obs = obs
_, time_horizon, _, n_cat = args
self.time_horizon = time_horizon
self.n_cat = n_cat
self.crd = numpy.array(self.data) / self.region.dh / self.region.dh / self.time_horizon / self.n_cat
return None
def plot(self, results, plot_dir, plot_args=None, show=False):
# compute expected rate density
with numpy.errstate(divide='ignore'):
plot_data = numpy.log10(self.region.get_cartesian(self.crd))
ax = plot_spatial_dataset(plot_data,
self.region,
plot_args={'clabel': r'Log$_{10}$ Approximate Rate Density'
'\n'
f'(Expected Events per year per {self.region.dh}°x{self.region.dh}°)',
'clim': [0, 5],
'title': f'Approximate Rate Density with Observations, M{self.min_mw}+'})
ax.scatter(self.obs.get_longitudes(), self.obs.get_latitudes(), marker='.', color='white', s=40, edgecolors='black')
crd_fname = self._build_filename(plot_dir, self.min_mw, 'crd_obs')
ax.figure.savefig(crd_fname + '.png')
ax.figure.savefig(crd_fname + '.pdf')
# self.ax.append(ax)
self.fnames.append(crd_fname)
class ConditionalApproximateRatePlot(AbstractProcessingTask):
def __init__(self, obs, **kwargs):
super().__init__(**kwargs)
self.obs = obs
self.data = defaultdict(list)
self.archive = False
self.version = 2
def process(self, data):
if self.name is None:
self.name = data.name
if self.region is None:
self.region = data.region
""" collects all catalogs conforming to n_obs in a dict"""
for mw in self.mws:
cat_filt = data.filter(f'magnitude >= {mw}')
obs_filt = self.obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
tolerance = 0.05 * n_obs
if cat_filt.event_count <= n_obs + tolerance \
and cat_filt.event_count >= n_obs - tolerance:
self.data[mw].append(cat_filt.spatial_counts())
def post_process(self, obs, args=None):
_, time_horizon, _, n_cat = args
self.time_horizon = time_horizon
self.n_cat = n_cat
return
def plot(self, results, plot_dir, plot_args=None, show=False):
# compute conditional approximate rate density
for i, mw in enumerate(self.mws):
# compute expected rate density
obs_filt = self.obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
continue
rates = numpy.array(self.data[mw])
if rates.shape[0] == 0:
continue
# compute conditional approximate rate
mean_rates = numpy.mean(rates, axis=0)
with numpy.errstate(divide='ignore'):
crd = numpy.log10(mean_rates)
plot_data = self.region.get_cartesian(crd)
ax = plot_spatial_dataset(plot_data,
self.region,
plot_args={'clabel': r'Log$_{10}$ Conditional Rate Density'
'\n'
f'(Expected Events per year per {self.region.dh}°x{self.region.dh}°)',
'clim': [-5, 0],
'title': f'Conditional Approximate Rate Density with Observations, M{mw}+'})
ax.scatter(obs_filt.get_longitudes(), obs_filt.get_latitudes(), marker='.', color='white', s=40,
edgecolors='black')
crd_fname = self._build_filename(plot_dir, mw, 'cond_rates')
ax.figure.savefig(crd_fname + '.png')
ax.figure.savefig(crd_fname + '.pdf')
# self.ax.append(ax)
self.fnames.append(crd_fname)
class CatalogMeanStabilityAnalysis(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.calc = False
self.mws = [2.5, 3.5, 4.5, 5.5, 6.5, 7.5]
def process(self, catalog):
if not self.name:
self.name = catalog.name
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
counts.append(cat_filt.event_count)
self.data.append(counts)
def post_process(self, obs, args=None):
results = {}
data = numpy.array(self.data)
n_sim = data.shape[0]
end_points = | numpy.arange(1,n_sim,100) | numpy.arange |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(224, 'P n -3 m :2', transformations)
space_groups[224] = sg
space_groups['P n -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(225, 'F m -3 m', transformations)
space_groups[225] = sg
space_groups['F m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(226, 'F m -3 c', transformations)
space_groups[226] = sg
space_groups['F m -3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(227, 'F d -3 m :2', transformations)
space_groups[227] = sg
space_groups['F d -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(228, 'F d -3 c :2', transformations)
space_groups[228] = sg
space_groups['F d -3 c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,1,0,1,0,0,0,0,-1]) | numpy.array |
import pandas as pd
import numpy as np
import math
def readFile(filename='iris.csv'):
"""read csv file and convert it to array"""
df = pd.read_csv(filename, header=0) # read the file
return df
def getDistance(x_data_row1, x_data_row2):
"""calculates distance between two rows of data (based on Euclidean distance)"""
distance = 0
length = len(x_data_row1)
for i in range(length):
distance += (x_data_row1[i] - x_data_row2[i])**2 # differences of the columns squared
distance = math.sqrt(distance)
return distance
def knnForOne(x_training_data, y_training_data, single_x_test_data, n_neighbors):
"""find the most common neighbor out of k nearest neighbors for 1 row of test data"""
distances_list = []
nearest_neighbors = []
length = len(x_training_data)
for i in range(length):
X2 = x_training_data[i,:] # get current row of known data
Y2 = y_training_data[i] # get current label of known data
distance = getDistance(single_x_test_data, X2) # compare test to known data
distances_list += [[distance, Y2]]
distances_list = sorted(distances_list)
for i in range(n_neighbors):
nearest_neighbors += [distances_list[i][1]]
return max(nearest_neighbors, key=nearest_neighbors.count)
def knnForAll(x_training_data, y_training_data, x_test_data, n_neighbors):
"""find the most common neighbor out of k nearest neighbors for multiple rows of test data"""
y_test_data = []
for row in x_test_data: # for multiple rows of test data
y_test_data += [knnForOne(x_training_data, y_training_data, row, n_neighbors)]
return y_test_data
def crossValidate(x_training_data, y_training_data, test_size_percentage):
"""find the value of k that produces the best results for the data"""
data_length = len(x_training_data)
foldSize = int(round(data_length * test_size_percentage)) # size of each temporary test data
best_score = 0
best_k = 0
for k in [1,3,5,7]: # Test different values of k
score = 0
for i in range(0, data_length, foldSize): # Switch section of test data
x_temp_test = x_training_data[i:i+foldSize] # get temporary data to test
known_y_test = y_training_data[i:i+foldSize] # we already know their labels
x_temp_training = np.append(x_training_data[0:i], x_training_data[i+foldSize:], axis=0) # the rest is our temporary training data
y_temp_training = | np.append(y_training_data[0:i], y_training_data[i+foldSize:], axis=0) | numpy.append |
import pandas as pd
import numpy as np
from os.path import join, exists, split
from os import mkdir, makedirs, listdir
import gc
import matplotlib.pyplot as plt
import seaborn
from time import time
import pickle
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('split_name')
parser.add_argument('f')
args = parser.parse_args()
split_name = args.split_name
f = args.f
# split_name = 'temporal_5'
# f = 'batch_42.h5'
metaid_oor_mapping = dict(vm136=[2.2, float('inf')],
vm146=[2.2, float('inf')],
vm5=[150, 65],
vm1=[110, 60],
pm41='Dobutamine',
pm42='Milrinone',
vm13=[-float('inf'), 4],
vm28=[2,-2],
vm172=[1.2, float('inf')],
vm174=[7.8, 4],
vm176=[10, float('inf')],
vm4=[140, 40],
vm62=[30, float('inf')],
vm3=[200, 90],
vm20=[-float('inf'), 90])
metaid_name_mapping = dict(vm136='a-Lactate',
vm146='v-Lactate',
vm5='ABP mean (invasive)',
vm1='Heart rate',
pm41='Dobutamine',
pm42='Milrinone',
vm13='Cardiac output',
vm28='RASS',
vm172='INR',
vm174='Blood glucose',
vm176='C-reactive protein',
vm4='ABP diastolic (invasive)',
vm62='Peak inspiratory pressure (ventilator)',
vm3='ABP systolic (invasive)',
vm20='SpO2')
del metaid_oor_mapping['pm41'], metaid_oor_mapping['pm42']
del metaid_name_mapping['pm41'], metaid_name_mapping['pm42']
delta_t = 0
window_size = 480
data_version = 'v6b'
result_version = '181108'
t_postevent = np.timedelta64(2,'h')
wsize_upper_h = (window_size+delta_t) * np.timedelta64(1,'m')
wsize_lower_h = delta_t * np.timedelta64(1,'m')
bern_path = '/cluster/work/grlab/clinical/Inselspital/DataReleases/01-19-2017/InselSpital/'
data_path = join(bern_path,'3_merged', data_version,'reduced')
ep_path = join(bern_path,'3a_endpoints', data_version,'reduced')
res_dir = lambda s: 'WorseStateFromZero_0.0_8.0_%s_lightgbm_full'%s
pred_path = join(bern_path,'8_predictions', result_version,'reduced',
split_name, res_dir('shap_top20_variables_MIMIC_BERN'))
out_path = join(bern_path,'circews_analysis','simple_alarm', split_name)
if not exists(out_path):
mkdir(out_path)
with pd.HDFStore(join(pred_path, f), mode='r') as tmp:
pids = [int(key[2:]) for key in tmp.keys()]
gc.collect()
lst_vmid = [key for key in metaid_name_mapping.keys()]
lst_period_type = ['critical_window', 'maintenance_window', 'uncritical_window', 'patients_wo_events']
stats = dict()
for vmid in lst_vmid + ['any']:
tmp = dict()
for period_type in lst_period_type:
tmp.update({period_type: dict(valid_los=[], cnt_alarm=[], los=[])})
tmp.update(cnt_catched_event=0, cnt_missed_event=0)
stats.update({vmid: tmp})
is_critical_win = lambda t, ts: np.logical_and(ts< t-wsize_lower_h,
ts>=t-wsize_upper_h)
is_maintenance_win = lambda t, ts: np.logical_and(ts>t,
ts<=t+t_postevent)
is_uncritical_win = lambda t, ts, mode: ts<t-wsize_upper_h if mode=='before' else ts>t+t_postevent
is_win_pos_alarm = lambda t, ts: np.logical_and(ts> t+wsize_lower_h,
ts<=t+wsize_upper_h)
t_start = time()
for n, pid in enumerate(pids):
ff = [x for x in listdir(data_path) if 'fmat_%d_'%int(f[:-3].split('_')[1]) in x][0]
df = pd.read_hdf(join(data_path, ff),'reduced', where='PatientID=%d'%pid)[['Datetime']+lst_vmid+['pm41','pm42','pm43','pm44','pm87']]
ff = [x for x in listdir(ep_path) if 'endpoints_%d_'%int(f[:-3].split('_')[1]) in x][0]
df_ep = pd.read_hdf(join(ep_path, ff), where='PatientID=%d'%pid)[['Datetime','endpoint_status']]
df_lbl = pd.read_hdf(join(pred_path, f),'p%d'%pid)[['AbsDatetime','TrueLabel']]
# df.loc[:,'Datetime'] = pd.DatetimeIndex(df.Datetime).round('min').values
# df_ep.loc[:,'Datetime'] = pd.DatetimeIndex(df_ep.Datetime).round('min').values
# df_lbl.loc[:,'AbsDatetime'] = pd.DatetimeIndex(df_lbl.AbsDatetime).round('min').values
total_los = 0
df = df.groupby('Datetime').mean()
df_ep.set_index('Datetime', inplace=True)
df_lbl.set_index('AbsDatetime', inplace=True)
df = df.merge(df_ep, how='outer', left_index=True, right_index=True)
df = df.merge(df_lbl, how='outer', left_index=True, right_index=True)
df.sort_index(inplace=True)
df_ep.loc[:,'Stable'] = (df_ep.endpoint_status=='event 0').astype(int)
df_ep.loc[:,'InEvent'] = df_ep.endpoint_status.isin(['event 1','event 2','event 3']).astype(int)
beg_stable = df_ep.index[np.where(np.array([0]+np.diff(df_ep.Stable.values).tolist())==1)]
end_stable = df_ep.index[np.where(np.array(np.diff(df_ep.Stable.values).tolist())==-1)]
if df_ep.iloc[0].Stable==1:
beg_stable = np.concatenate([[df_ep.index[0]], beg_stable])
if df_ep.iloc[-1].Stable==1:
end_stable = np.concatenate([end_stable, [df_ep.index[-1]]])
assert(len(beg_stable)==len(end_stable))
df.loc[:,'Stable'] = False
for i in range(len(beg_stable)):
df.loc[df.index[np.logical_and(df.index>=beg_stable[i],
df.index<=end_stable[i])],'Stable'] = True
beg_onset = df_ep.index[np.where(np.array([0]+np.diff(df_ep.InEvent).tolist())==1)]
end_onset = df_ep.index[np.where(np.array(np.diff(df_ep.InEvent).tolist())==-1)]
if df_ep.iloc[0].InEvent==1:
beg_onset = np.concatenate([[df_ep.index[0]], beg_onset])
if df_ep.iloc[-1].InEvent==1:
end_onset = np.concatenate([end_onset, [df_ep.index[-1]]])
assert(len(beg_onset)==len(end_onset))
df.loc[:,'InEvent'] = False
for i in range(len(beg_onset)):
df.loc[df.index[np.logical_and(df.index>=beg_onset[i],
df.index<=end_onset[i])],'InEvent'] = True
for col in ['Stable', 'InEvent']:
df.loc[:,col] = df[col].astype(int)
df.loc[:,'Uncertain'] = ((df.Stable+df.InEvent)==0).astype(int)
for pmid in ['pm41','pm42','pm43','pm44','pm87']:
df.loc[:,pmid] = df[pmid].fillna(method='ffill').fillna(0)
df['OnDrug'] = (df[['pm41','pm42','pm43','pm44','pm87']].sum(axis=1)>0).astype(int)
df.loc[:,'Onset'] = False
for i, dt in enumerate(beg_onset):
dt = np.datetime64(dt)
win_pre_event = df[is_critical_win(dt, df.index.values)]
if len(win_pre_event)==0 or win_pre_event.Stable.sum()==0:
continue
df.loc[dt,'Onset'] = True
del df_ep, df_lbl
gc.collect()
dt_unstable = df.index[df.Stable==0]
for col in metaid_oor_mapping.keys():
if metaid_oor_mapping[col][0] > metaid_oor_mapping[col][1]:
if col == 'vm28':
df.loc[:,col+'_Alarm'] = np.logical_or(df[col].values >= metaid_oor_mapping[col][0],
df[col].values < metaid_oor_mapping[col][1])
else:
df.loc[:,col+'_Alarm'] = np.logical_or(df[col].values > metaid_oor_mapping[col][0],
df[col].values < metaid_oor_mapping[col][1])
else:
df.loc[:,col+'_Alarm'] = np.logical_and(df[col].values > metaid_oor_mapping[col][0],
df[col].values < metaid_oor_mapping[col][1])
if len(dt_unstable) > 0:
df.loc[dt_unstable, col+'_Alarm'] = np.nan
for dt in df.index[np.abs(df[col+'_Alarm'])==1]:
dt = np.datetime64(dt)
win_pos_alarm = df[is_win_pos_alarm(dt, df.index.values)]
if win_pos_alarm.InEvent.sum() > 0:
df.loc[dt, col+'_Alarm'] = +1
elif win_pos_alarm.Uncertain.sum() == len(win_pos_alarm):
df.loc[dt, col+'_Alarm'] = 0
else:
df.loc[dt, col+'_Alarm'] = -1
df['any_Alarm'] = np.abs(df[[col for col in df.columns if 'Alarm' in col]]).sum(axis=1)>0
if len(dt_unstable) > 0:
df.loc[dt_unstable,'any_Alarm'] = np.nan
for dt in df.index[np.abs(df.any_Alarm)==1]:
dt = np.datetime64(dt)
win_pos_alarm = df[is_win_pos_alarm(dt, df.index.values)]
if win_pos_alarm.InEvent.sum() > 0:
df.loc[dt,'any_Alarm'] = 1
elif win_pos_alarm.Uncertain.sum() == len(win_pos_alarm):
df.loc[dt,'any_Alarm'] = 0
else:
df.loc[dt,'any_Alarm'] = -1
for vmid in lst_vmid+['any']:
df.loc[:,vmid+'_CatchedOnset'] = False
for i, dt in enumerate(df.index[df.Onset]):
dt = np.datetime64(dt)
win_pre_event = df[is_critical_win(dt, df.index.values)]
for vmid in lst_vmid+['any']:
df.loc[dt,vmid+'_CatchedOnset'] = win_pre_event[vmid+'_Alarm'].abs().sum()>0
if df.InEvent.sum()==0:
# assert('Yes' not in df.IsAlarmTrue.unique())
tdiff = np.array([0]+(np.diff(df.index.values)/np.timedelta64(1,'h')).tolist())
tdiff_stable = tdiff[df.Stable==1]
los_h = np.sum(tdiff)
los_stable_h = np.sum(tdiff_stable)
for vmid in lst_vmid+['any']:
stats[vmid]['patients_wo_events']['valid_los'].append( los_stable_h )
stats[vmid]['patients_wo_events']['los'].append( los_h )
stats[vmid]['patients_wo_events']['cnt_alarm'].append( df[vmid+'_Alarm'].abs().sum() )
else:
stable_sum = 0
beg_onset = df.index[np.where(np.array([0]+np.diff(df.InEvent.values).tolist())==1)[0]]
end_onset = df.index[np.where( | np.diff(df.InEvent.values) | numpy.diff |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from language.nql import dataset
from language.nql import nql
import numpy as np
import tensorflow as tf
NP_NONE = np.array([0., 0., 0., 0., 0.])
NP_A = np.array([1., 0., 0., 0., 0.])
NP_B = np.array([0., 1., 0., 0., 0.])
NP_C = | np.array([0., 0., 1., 0., 0.]) | numpy.array |
import datetime
import logging
import time
from pathlib import Path
from typing import Dict, List
import nibabel as nib
import numpy as np
import SimpleITK as sitk
from nilearn.image import resample_img
log = logging.getLogger(__name__)
def time_it(func):
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
log.info(func.__name__ + " took " + str(end - start) + "sec")
return result
return wrapper
def resample_nifti(nifti_path, output_path, res=1.0):
"""
Resamples a nifti to an isotropic resolution of res
Args:
nifti_path: path to a NifTI image
output_path: path where to save the resampled NifTI image
"""
assert Path(nifti_path).exists()
nifti = nib.load(nifti_path)
nifti_resampled = resample_img(
nifti, target_affine=np.eye(3) * res, interpolation="nearest"
)
nib.save(nifti_resampled, output_path)
def resample_to_img(img, target_img, interpolation="nearest"):
interpolation_mapper = {
"nearest": sitk.sitkNearestNeighbor,
"linear": sitk.sitkLinear,
"bspline": sitk.sitkBSpline,
"gaussian": sitk.sitkGaussian,
}
try:
sitk_interpolator = interpolation_mapper[interpolation]
except ValueError:
raise ValueError(f"Interpolation {interpolation} not supported.")
resampled_img = sitk.Resample(
img,
target_img,
sitk.Transform(),
sitk_interpolator,
0,
img.GetPixelID(),
)
return resampled_img
def resample_to_nifti(
nifti_path, ref_path, output_path=None, interpolation="nearest"
):
"""
Resamples nifti to reference nifti, using nilearn.image.resample_to_img
if output_path not give, overwrites the nifti_path.
"""
nifti_path = str(nifti_path)
ref_path = str(ref_path)
if output_path is None:
output_path = nifti_path
nifti = sitk.ReadImage(nifti_path)
ref_nifti = sitk.ReadImage(ref_path)
nifti_resampled = resample_to_img(
img=nifti, target_img=ref_nifti, interpolation=interpolation
)
sitk.WriteImage(nifti_resampled, output_path)
def combine_nifti_masks(mask1_path, mask2_path, output_path):
"""
Args:
mask1_path: abs path to the first nifti mask
mask2_path: abs path to the second nifti mask
output_path: abs path to saved concatenated mask
"""
if not Path(mask1_path).exists():
raise FileNotFoundError(f"Mask {mask1_path} not found.")
if not Path(mask2_path).exists():
raise FileNotFoundError(f"Mask {mask2_path} not found.")
mask1 = nib.load(mask1_path)
mask2 = nib.load(mask2_path)
matrix1 = mask1.get_fdata()
matrix2 = mask2.get_fdata()
assert matrix1.shape == matrix2.shape
new_matrix = np.zeros(matrix1.shape)
new_matrix[matrix1 == 1] = 1
new_matrix[matrix2 == 1] = 2
new_matrix = new_matrix.astype(int)
new_mask = nib.Nifti1Image(
new_matrix, affine=mask1.affine, header=mask1.header
)
nib.save(new_mask, output_path)
def relabel_mask(mask_path: str, label_map: Dict[int, int], save_path):
"""
Relabel mask with a new label map.
E.g. for for a prostate mask with two labels:
1 for peripheral zone and 2 for transition zone,
relabel_mask(mask_path, {1: 1, 2: 1}) would merge both zones
into label 1.
"""
if not Path(mask_path).exists():
raise FileNotFoundError(f"Mask {mask_path} not found.")
mask = nib.load(mask_path)
matrix = mask.get_fdata()
n_found_labels = len( | np.unique(matrix) | numpy.unique |
import os
import h5py
import numpy as np
import dataloaders.nyu_transforms as transforms
from torch.utils.data import Dataset
# NYU and Kitti_odo
import dataloaders.nyu_transforms as nyu_transforms
from dataloaders.pert_nyu import ShiftDepth
iheight, iwidth = 480, 640 # raw image size
to_tensor = nyu_transforms.ToTensor()
IMG_EXTENSIONS = ['.h5',]
def h5_loader(path):
h5f = h5py.File(path, "r")
rgb = np.array(h5f['rgb'])
rgb = np.transpose(rgb, (1, 2, 0))
depth = np.array(h5f['depth'])
return rgb, depth
class MyDataloader(Dataset):
modality_names = ['rgb', 'rgbd', 'd'] # , 'g', 'gd'
color_jitter = nyu_transforms.ColorJitter(0.4, 0.4, 0.4)
def __init__(self, root, type, sparsifier=None, modality='rgb', shift=None, rotate=None, loader=h5_loader):
classes, class_to_idx = find_classes(root)
imgs = make_dataset(root, class_to_idx)
assert len(imgs)>0, "Found 0 images in subfolders of: " + root + "\n"
self.root = root
self.imgs = imgs
self.classes = classes
self.shift = shift
self.rotate = rotate
self.class_to_idx = class_to_idx
if type == 'train':
self.transform = self.train_transform
elif type == 'selval':
self.transform = self.val_transform
else:
raise (RuntimeError("Invalid dataset type: " + type + "\n"
"Supported dataset types are: train, selval"))
self.loader = loader
self.sparsifier = sparsifier
assert (modality in self.modality_names), "Invalid modality type: " + modality + "\n" + \
"Supported dataset types are: " + ''.join(self.modality_names)
self.modality = modality
def train_transform(self, rgb, depth):
raise (RuntimeError("train_transform() is not implemented. "))
def val_transform(rgb, depth):
raise (RuntimeError("val_transform() is not implemented."))
def create_sparse_depth(self, rgb, depth):
if self.sparsifier is None:
return depth
else:
mask_keep = self.sparsifier.dense_to_sparse(rgb, depth)
sparse_depth = np.zeros(depth.shape)
sparse_depth[mask_keep] = depth[mask_keep]
return sparse_depth
def create_rgbd(self, rgb, depth):
sparse_depth = self.create_sparse_depth(rgb, depth)
rgbd = np.append(rgb, np.expand_dims(sparse_depth, axis=2), axis=2)
return rgbd
def __getraw__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (rgb, depth) the raw data.
"""
path, target = self.imgs[index]
rgb, depth = self.loader(path)
return rgb, depth
def __getitem__(self, index):
rgb, depth = self.__getraw__(index)
# Perturb the dataset
if self.shift is not None:
shift_depth_transform = ShiftDepth(shift=self.shift)
pert_depth = shift_depth_transform(depth)
elif self.rotate is not None:
rotate_depth_transform = ShiftDepth(rotate=self.rotate)
pert_depth = rotate_depth_transform (depth)
else:
pert_depth = depth
if self.transform is not None:
rgb_np, depth_np = self.transform(rgb, pert_depth)
else:
raise(RuntimeError("transform not defined"))
# color normalization
# rgb_tensor = normalize_rgb(rgb_tensor)
# rgb_np = normalize_np(rgb_np)
if self.modality == 'rgb':
input_np = rgb_np
elif self.modality == 'rgbd':
input_np = self.create_rgbd(rgb_np, pert_depth)
elif self.modality == 'd':
input_np = self.create_sparse_depth(rgb_np, pert_depth)
input_tensor = to_tensor(input_np)
while input_tensor.dim() < 3:
input_tensor = input_tensor.unsqueeze(0)
depth_tensor = to_tensor(depth)
depth_tensor = depth_tensor.unsqueeze(0)
return input_tensor, depth_tensor
def __len__(self):
return len(self.imgs)
class NYUDataset(MyDataloader):
def __init__(self, root, type, sparsifier=None, modality='rgb', shift=None, rotate=None):
super(NYUDataset, self).__init__(root, type, sparsifier, modality, shift, rotate)
self.output_size = (228, 304)
def train_transform(self, rgb, depth):
s = np.random.uniform(1.0, 1.5) # random scaling
depth_np = depth / s
angle = np.random.uniform(-5.0, 5.0) # random rotation degrees
do_flip = | np.random.uniform(0.0, 1.0) | numpy.random.uniform |
import nibabel as nib
import numpy as np
import torch
from functools import partial
from collections import defaultdict
from pairwise_measures import PairwiseMeasures
from src.utils import apply_transform, non_geometric_augmentations, generate_affine, to_var_gpu, batch_adaptation, soft_dice
def evaluate(args, preds, targets, prefix,
metrics=['dice', 'jaccard', 'sensitivity', 'specificity', 'soft_dice',
'loads', 'haus_dist', 'vol_diff', 'ppv', 'connected_elements']):
output_dict = defaultdict(list)
nifty_metrics = ['dice', 'jaccard', 'sensitivity', 'specificity',
'haus_dist', 'vol_diff', 'ppv', 'connected_elements']
for pred, target in zip(preds, targets):
seg = np.where(pred > 0.5, np.ones_like(pred, dtype=np.int64), np.zeros_like(pred, dtype=np.int64))
ref = np.where(target > 0.5, np.ones_like(target, dtype=np.int64), np.zeros_like(target, dtype=np.int64))
pairwise = PairwiseMeasures(seg, ref)
for metric in nifty_metrics:
if metric in metrics:
if metric == 'connected_elements':
TPc, FPc, FNc = pairwise.m_dict[metric][0]()
output_dict[prefix + 'TPc'].append(TPc)
output_dict[prefix + 'FPc'].append(FPc)
output_dict[prefix + 'FNc'].append(FNc)
else:
output_dict[prefix + metric].append(pairwise.m_dict[metric][0]())
if 'soft_dice' in metrics:
output_dict[prefix + 'soft_dice'].append(soft_dice(pred, ref, args.labels))
if 'loads' in metrics:
output_dict[prefix + 'loads'].append(np.sum(pred))
if 'per_pixel_diff' in metrics:
output_dict[prefix + 'per_pixel_diff'].append(np.mean(np.abs(ref - pred)))
return output_dict
def inference_tumour(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
print('Evaluating on {} subjects'.format(len(range_of_volumes)))
for index in range(len(range_of_volumes)):
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
#TODO: inputs is of size (4, 170, 240, 160), need to change inference values accordingly.
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
inputsS = np.zeros(shape=(inputs.shape[0], args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputsAug = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
for slice_index in np.arange(0, inputs.shape[-1], step=args.batch_size):
index_start = slice_index
index_end = min(slice_index+args.batch_size, inputs.shape[-1])
batch_input = np.einsum('ijkl->lijk', inputs[:, :, :, index_start:index_end])
batch_labels = np.einsum('ijk->kij', labels[:, :, index_start:index_end])
batch_input = torch.tensor(batch_input)
batch_labels = torch.tensor(np.expand_dims(batch_labels, axis=1))
batch_input, batch_labels = batch_adaptation(batch_input, batch_labels, args.paddtarget)
batch_input, batch_labels = to_var_gpu(batch_input), to_var_gpu(batch_labels)
outputs, _, _, _, _, _, _, _, _, _ = model(batch_input)
outputs = torch.sigmoid(outputs)
if args.method == 'A2':
Theta, Theta_inv = generate_affine(batch_input, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(batch_input, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
elif args.method == 'A4':
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
elif args.method in ['A3', 'adversarial', 'mean_teacher']:
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
outputS[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputs.detach().cpu().numpy()))
targetL[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(batch_labels.detach().cpu().numpy()))
inputsS[:, :, :, index_start:index_end] = np.einsum('ijkl->jkli', np.squeeze(batch_input.detach().cpu().numpy()))
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
outputsAug[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputstaug.detach().cpu().numpy()))
if args.method in ['A3', 'A2', 'adversarial', 'mean_teacher']:
outputsT[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputs_t.detach().cpu().numpy()))
format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source, args.target, args.tag,
iteration) + \
'_{}_' + f'{str(subj_id)}.nii.gz'
ema_format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source,
args.target, args.tag, 'EMA') + \
'_{}_' + f'{str(subj_id)}.nii.gz'
if iteration == 0:
fn = save_img(format_spec=ema_format_spec, identifier='Prediction', array=outputS)
else:
pred_zero = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_0__Prediction_{str(subj_id)}.nii.gz'
outputs_0 = nib.load(pred_zero).get_data()
preds_0.append(outputs_0)
alpha = 0.9
pred_ema_filename = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_EMA__Prediction_{str(subj_id)}.nii.gz'
pred_ema_t_minus_one = nib.load(pred_ema_filename).get_data()
pred_ema = alpha * outputS + (1 - alpha) * pred_ema_t_minus_one
preds_ema.append(pred_ema)
save_img(format_spec=ema_format_spec, identifier='Prediction', array=pred_ema)
save_img(format_spec=format_spec, identifier='Prediction', array=outputS)
save_img(format_spec=format_spec, identifier='target', array=targetL)
for idx, modality in enumerate(['flair', 't1c', 't1', 't2']):
save_img(format_spec=format_spec, identifier='{}_mri'.format(modality), array=inputsS[idx, ...])
preds.append(outputS)
targets.append(targetL)
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
predsAug.append(outputsAug)
save_img(format_spec=format_spec, identifier='Aug', array=outputsAug)
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
predsT.append(outputsT)
save_img(format_spec=format_spec, identifier='Transformed', array=outputsT)
performance_supervised = evaluate(args=args, preds=preds, targets=targets, prefix='supervised_')
performance_i = None
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
performance_i = evaluate(args=args, preds=predsAug, targets=predsT, prefix='consistency_')
else:
performance_i = evaluate(args=args, preds=predsAug, targets=preds, prefix='consistency_')
if iteration == 0:
return performance_supervised, performance_i, None, None
else:
performance_compared_to_0 = evaluate(args=args, preds=preds, targets=preds_0, prefix='diff_to_0_',
metrics=['per_pixel_diff'])
performance_compared_to_ema = evaluate(args=args, preds=preds, targets=preds_ema, prefix='diff_to_ema_',
metrics=['per_pixel_diff'])
return performance_supervised, performance_i, performance_compared_to_0, performance_compared_to_ema
def inference_ms(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None, eval_diff=True):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
print('Evaluating on {} subjects'.format(len(whole_volume_dataset)))
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
for index in range_of_volumes:
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
inputsS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsAug = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
for slice_index in np.arange(0, inputs.shape[2], step=args.batch_size):
index_start = slice_index
index_end = min(slice_index+args.batch_size, inputs.shape[2])
batch_input = np.einsum('ijk->kij', inputs[:, :, index_start:index_end])
batch_labels = np.einsum('ijk->kij', labels[:, :, index_start:index_end])
batch_input = torch.tensor(np.expand_dims(batch_input, axis=1).astype(np.float32))
batch_labels = torch.tensor(np.expand_dims(batch_labels, axis=1))
batch_input, batch_labels = batch_adaptation(batch_input, batch_labels, args.paddtarget)
batch_input, batch_labels = to_var_gpu(batch_input), to_var_gpu(batch_labels)
outputs, _, _, _, _, _, _, _, _, _ = model(batch_input)
outputs = torch.sigmoid(outputs)
if args.method == 'A2':
Theta, Theta_inv = generate_affine(batch_input, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(batch_input, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
elif args.method == 'A4':
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
elif args.method in ['A3', 'adversarial', 'mean_teacher']:
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
outputS[:, :, index_start:index_end] = np.einsum('ijk->jki', outputs.detach().cpu().numpy()[:, 0, ...])
targetL[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_labels.detach().cpu().numpy()[:, 0, ...])
inputsS[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_input.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
outputsAug[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputstaug.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A3', 'A2', 'adversarial', 'mean_teacher']:
outputsT[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputs_t.detach().cpu().numpy()[:, 0, ...])
format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source, args.target, args.tag, iteration) +\
'_{}_' + f'{str(subj_id)}.nii.gz'
ema_format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source,
args.target, args.tag, 'EMA') + \
'_{}_' + f'{str(subj_id)}.nii.gz'
if iteration == 0:
save_img(format_spec=ema_format_spec, identifier='Prediction', array=outputS)
elif eval_diff and iteration > 0:
pred_zero = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_{0}__Prediction_{str(subj_id)}.nii.gz'
outputs_0 = nib.load(pred_zero).get_data()
preds_0.append(outputs_0)
alpha = 0.9
pred_ema_filename = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_EMA__Prediction_{str(subj_id)}.nii.gz'
print(pred_ema_filename)
pred_ema_t_minus_one = nib.load(pred_ema_filename).get_data()
pred_ema = alpha * outputS + (1 - alpha) * pred_ema_t_minus_one
preds_ema.append(pred_ema)
save_img(format_spec=ema_format_spec, identifier='Prediction', array=pred_ema)
else:
print('Not computing diff')
save_img(format_spec=format_spec, identifier='Prediction', array=outputS)
save_img(format_spec=format_spec, identifier='target', array=targetL)
save_img(format_spec=format_spec, identifier='mri', array=inputsS)
preds.append(outputS)
targets.append(targetL)
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
predsAug.append(outputsAug)
save_img(format_spec=format_spec, identifier='Aug', array=outputsAug)
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
predsT.append(outputsT)
save_img(format_spec=format_spec, identifier='Transformed', array=outputsT)
performance_supervised = evaluate(args=args, preds=preds, targets=targets, prefix='supervised_')
performance_i = None
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
performance_i = evaluate(args=args, preds=predsAug, targets=predsT, prefix='consistency_')
else:
performance_i = evaluate(args=args, preds=predsAug, targets=preds, prefix='consistency_')
if iteration == 0:
return performance_supervised, performance_i, None, None
else:
performance_compared_to_0 = evaluate(args=args, preds=preds, targets=preds_0, prefix='diff_to_0_',
metrics=['per_pixel_diff'])
performance_compared_to_ema = evaluate(args=args, preds=preds, targets=preds_ema, prefix='diff_to_ema_',
metrics=['per_pixel_diff'])
return performance_supervised, performance_i, performance_compared_to_0, performance_compared_to_ema
def inference_crossmoda(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None, eval_diff=True):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
print('Evaluating on {} subjects'.format(len(whole_volume_dataset)))
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
for index in range_of_volumes:
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
inputsS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsAug = | np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2])) | numpy.zeros |
# -*- coding: utf-8 -*-
###############################################################################
###############################################################################
import logging
import numpy as np
from skimage import io
# create logger
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
###############################################################################
###############################################################################
# global params
fld = 'data/'
satellite_images = ['20090526', '20110514', '20120524', '20130608',
'20140517', '20150507', '20160526']
train_images = satellite_images[:-1]
alt = 'DEM_altitude.tif'
slp = 'DEM_slope.tif'
def load_satellite_img(path, date, normalize=True):
img = io.imread(path + date + ".tif").astype(np.float32)
ndvi = io.imread(path + date + "_NDVI.tif").astype(np.float32)[..., None]
if normalize:
img /= 20000.0
ndvi /= 255.0 # TODO ask paul: too high ?
return img, ndvi
def load_satellite_mask(path: str, date: str):
return io.imread(path + date + "_mask_ls.tif").astype(np.bool)
def load_static_data(path: str, normalize: bool = True):
altitude = io.imread(path + alt).astype(np.float32)[..., None]
slope = io.imread(path + slp).astype(np.float32)[..., None]
if normalize:
altitude /= 2555.0
slope /= 52.0
return altitude, slope
def load_image_eval(path):
altitude, slope = load_static_data(path)
img1 = get_single_satellite_features(path, satellite_images[-1])
img2 = get_single_satellite_features(path, satellite_images[-2])
return np.concatenate((img1, img2, altitude, slope), 2)
def get_single_satellite_features(path, date):
sat_image, ndvi = load_satellite_img(path, date)
return np.concatenate((sat_image, ndvi), axis=2)
def extract_patch(data, x, y, size):
"""Expects a 3 dimensional image (height,width,channels)"""
diff = size // 2
patch = data[x - diff:x + diff + 1, y - diff:y + diff + 1, :]
return patch
def patch_validator(shape, pos, size):
if ((pos[0] < size) or
(pos[1] < size) or
(shape[0] - pos[0] < size) or
(shape[1] - pos[1] < size)):
return False
return True
def compute_coordinates(masks):
"""Expects a list of image masks and computes two sets of coordinates, one for positive events and one for
negatives """
positives, negatives = [], []
for year, mask in enumerate(masks):
logger.info(" process mask {}".format(year))
# positive samples
x_pos, y_pos = np.where(mask == 1)
d_pos = np.zeros_like(x_pos) + year
positive = np.stack((d_pos, x_pos, y_pos)).T
positives.append(positive)
# negative samples
x_neg, y_neg = np.where(mask == 0)
d_neg = np.zeros_like(x_neg) + year
negative = np.stack((d_neg, x_neg, y_neg)).T
negatives.append(negative)
# put everything together
logger.info("concatenate coordinates")
positives = np.concatenate(positives)
negatives = np.concatenate(negatives)
return positives, negatives
def load_sat_images(path):
sat_images = []
for sat_image, ndvi in (load_satellite_img(path, d) for d in train_images):
sat_images.append(np.concatenate((sat_image, ndvi), axis=2))
return np.stack(sat_images, axis=0)
def make_small_dataset(path):
"""Computes full dataset"""
logger.info("load landslides and masks")
sat_images = load_sat_images(path)
logger.info("calculate coordinates per mask")
masks = list(load_satellite_mask(path, d) for d in train_images)
positives, negatives = compute_coordinates(masks)
altitude, slope = load_static_data(path)
return sat_images, positives, negatives, altitude, slope
def index_generator(data, validator, image_size, size, batch_size):
batch = np.empty((batch_size, 3), dtype=np.int32)
ctr = 0
while True:
indices = np.random.permutation(len(data))
for i in indices:
if validator(image_size, data[i][1:], size):
batch[ctr] = data[i]
ctr += 1
if ctr == batch_size:
yield batch
ctr = 0
def patch_generator(images, pos, neg, altitude, slope, size=25, batch_size=64, p=0.4):
# calculate the batch size per label
batch_size_pos = max(1, int(batch_size * p))
batch_size_neg = batch_size - batch_size_pos
image_size = images.shape[1:]
# init index generators
idx_pos = index_generator(pos, patch_validator, image_size, size, batch_size_pos)
idx_neg = index_generator(neg, patch_validator, image_size, size, batch_size_neg)
for sample_idx_pos, sample_idx_neg in zip(idx_pos, idx_neg):
X = []
for year, x, y in sample_idx_pos:
patch_1 = extract_patch(images[year], x, y, size)
if year == 0:
patch_2 = patch_1
else:
patch_2 = extract_patch(images[year - 1], x, y, size)
patch_atl = extract_patch(altitude, x, y, size)
patch_slp = extract_patch(slope, x, y, size)
X.append(np.concatenate((patch_1, patch_2, patch_atl, patch_slp), axis=2))
for year, x, y in sample_idx_neg:
patch_1 = extract_patch(images[year], x, y, size)
if year == 0:
patch_2 = patch_1
else:
patch_2 = extract_patch(images[year - 1], x, y, size)
patch_atl = extract_patch(altitude, x, y, size)
patch_slp = extract_patch(slope, x, y, size)
X.append( | np.concatenate((patch_1, patch_2, patch_atl, patch_slp), axis=2) | numpy.concatenate |
import sys
import os
import warnings
import itertools
import subprocess
import numpy as np
import pandas as pd
import slack
import scipy.stats as st
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
from matplotlib.gridspec import GridSpec
exec(open(os.path.abspath(os.path.join(
os.path.dirname(__file__), os.path.pardir, 'visualisation', 'light_mode.py'))).read())
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from rotvel_correlation.simstats import Simstats
warnings.filterwarnings("ignore")
pathSave = '/cosma6/data/dp004/dc-alta2/C-Eagle-analysis-work/rotvel_correlation'
def bayesian_blocks(t):
"""Bayesian Blocks Implementation
By <NAME>. License: BSD
Based on algorithm outlined in http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
Parameters
----------
t : ndarray, length N
data to be histogrammed
Returns
-------
bins : ndarray
array containing the (N+1) bin edges
Notes
-----
This is an incomplete implementation: it may fail for some
datasets. Alternate fitness functions and prior forms can
be found in the paper listed above.
"""
# copy and sort the array
t = np.sort(t)
N = t.size
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays needed for the iteration
nn_vec = np.ones(N)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
#-----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
#-----------------------------------------------------------------
for K in range(N):
# Compute the width and count of the final bin for all possible
# locations of the K^th changepoint
width = block_length[:K + 1] - block_length[K + 1]
count_vec = np.cumsum(nn_vec[:K + 1][::-1])[::-1]
# evaluate fitness function for these possibilities
fit_vec = count_vec * (np.log(count_vec) - np.log(width))
fit_vec -= 4 # 4 comes from the prior on the number of changepoints
fit_vec[1:] += best[:K]
# find the max of the fitness: this is the K^th changepoint
i_max = np.argmax(fit_vec)
last[K] = i_max
best[K] = fit_vec[i_max]
#-----------------------------------------------------------------
# Recover changepoints by iteratively peeling off the last block
#-----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
def freedman_diaconis(x: np.ndarray) -> np.ndarray:
"""
The binwidth is proportional to the interquartile range (IQR) and inversely proportional to cube root of a.size.
Can be too conservative for small datasets, but is quite good for large datasets. The IQR is very robust to
outliers.
:param x: np.ndarray
The 1-dimensional x-data to bin.
:return: np.ndarray
The bins edges computed using the FD method.
"""
return np.histogram_bin_edges(x, bins='fd')
def equal_number_FD(x: np.ndarray) -> np.ndarray:
"""
Takes the number of bins computed using the FD method, but then selects the bin edges splitting
the dataset in bins with equal number of data-points.
:param x: np.ndarray
The 1-dimensional x-data to bin.
:return: np.ndarray
The bins edges computed using the equal-N method.
"""
nbin = len(np.histogram_bin_edges(x, bins='fd')) - 1
npt = len(x)
return np.interp(np.linspace(0, npt, nbin + 1),
np.arange(npt),
np.sort(x))
# Print some overall stats about the datasets
sys.stdout = open(os.devnull, 'w')
read_apertures = [Simstats(simulation_name='macsis', aperture_id=i).read_simstats() for i in range(20)]
sys.stdout = sys.__stdout__
for apid, stat in enumerate(read_apertures):
print(f"Aperture radius {apid} \t --> \t {stat['R_aperture'][0]/stat['R_200_crit'][0]:1.2f} R_200_crit")
del read_apertures
sys.stdout = open(os.devnull, 'w')
read_redshifts = [Simstats(simulation_name=i, aperture_id=0).read_simstats() for i in ['macsis', 'celr_e']]
sys.stdout = sys.__stdout__
for sim_name, stat in zip(['macsis', 'celr_e'], read_redshifts):
print('\n')
for zid, redshift in enumerate(stat.query('cluster_id == 0')['redshift_float']):
print(f"Simulation: {sim_name:<10s} Redshift {zid:2d} --> {redshift:1.2f}")
del read_redshifts
# Start with one single aperture
aperture_id = 9
simstats = list()
simstats.append(Simstats(simulation_name='macsis', aperture_id=aperture_id))
simstats.append(Simstats(simulation_name='celr_e', aperture_id=aperture_id))
simstats.append(Simstats(simulation_name='celr_b', aperture_id=aperture_id))
stats_out = [sim.read_simstats() for sim in simstats]
attrs = [sim.read_metadata() for sim in simstats]
print(f"\n{' stats_out DATASET INFO ':-^50s}")
print(stats_out[0].info())
# Create SQL query
query_COLLECTIVE = list()
query_COLLECTIVE.append('redshift_float < 0.02')
query_COLLECTIVE.append('M_200_crit > 10**9')
query_COLLECTIVE.append('thermodynamic_merging_index_T < 1')
stats_filtered = [stat.query(' and '.join(query_COLLECTIVE)) for stat in stats_out]
# Generate plots catalog
x_labels = ['redshift_float', 'R_500_crit', 'R_aperture', 'M_2500_crit', 'M_aperture_T',
'peculiar_velocity_T_magnitude', 'angular_momentum_T_magnitude',
'dynamical_merging_index_T', 'thermodynamic_merging_index_T',
'substructure_fraction_T']
y_labels = ['M_200_crit','rotTvelT','rot0rot4','rot1rot4','dynamical_merging_index_T',
'thermodynamic_merging_index_T','substructure_fraction_T']
data_entries = list(itertools.product(x_labels, y_labels))
x_labels = []
y_labels = []
for entry in data_entries:
if entry[0] is not entry[1]:
x_labels.append(entry[0])
y_labels.append(entry[1])
xscale = []
yscale = []
for x in x_labels:
scale = 'log' if 'M' in x or 'velocity' in x else 'linear'
xscale.append(scale)
for y in y_labels:
scale = 'log' if 'M' in y or 'velocity' in y else 'linear'
yscale.append(scale)
data_summary = {
'x' : x_labels,
'y' : y_labels,
'xscale' : xscale,
'yscale' : yscale,
}
summary = pd.DataFrame(data=data_summary, columns=data_summary.keys())
summary = summary[summary['y'].str.contains('rot')]
summary = summary[~summary['x'].str.contains('redshift')]
print(f"\n{' summary DATASET PLOTS INFO ':-^40s}\n", summary)
# Activate the plot factory
print(f"\n{' RUNNING PLOT FACTORY ':-^50s}")
data_entries = summary.to_dict('r')
x_binning = bayesian_blocks
print(f"[+] Binning method for x_data set to `{x_binning.__name__}`.")
for entry_index, data_entry in enumerate(data_entries):
filename = f"{data_entry['x'].replace('_', '')}_{data_entry['y'].replace('_', '')}_aperture{aperture_id}.pdf"
are_files = [os.path.isfile(os.path.join(pathSave, 'scatter', filename)),
os.path.isfile(os.path.join(pathSave, 'kdeplot', filename)),
os.path.isfile(os.path.join(pathSave, 'median', filename))]
#if any(are_files): continue
fig = plt.figure(figsize=(15, 10))
gs = GridSpec(2, 3, figure=fig)
gs.update(wspace=0., hspace=0.)
info_ax0 = fig.add_subplot(gs[0]); info_ax0.axis('off')
ax1 = fig.add_subplot(gs[1])
info_ax1 = fig.add_subplot(gs[2]); info_ax1.axis('off')
ax2 = fig.add_subplot(gs[3], sharex=ax1, sharey=ax1)
ax3 = fig.add_subplot(gs[4], sharex=ax2, sharey=ax2)
ax4 = fig.add_subplot(gs[5], sharex=ax3, sharey=ax3)
ax = [ax1, ax2, ax3, ax4]
plt.setp(ax[0].get_xticklabels(), visible=False)
plt.setp(ax[2].get_yticklabels(), visible=False)
plt.setp(ax[3].get_yticklabels(), visible=False)
xlims = [np.min(pd.concat(stats_filtered)[data_entry['x']]), np.max(pd.concat(stats_filtered)[data_entry['x']])]
ylims = [np.min(pd.concat(stats_filtered)[data_entry['y']]), np.max(pd.concat(stats_filtered)[data_entry['y']])]
# Unresolved issue with the Latex labels
# Some contain an extra `$` at the end of the string, which should not be there.
label_x = attrs[0]['Columns/labels'][data_entry['x']]
label_y = attrs[0]['Columns/labels'][data_entry['y']]
if label_x.endswith('$'): label_x = label_x.rstrip('$')
if label_y.endswith('$'): label_y = label_y.rstrip('$')
ax[0].set_ylabel(label_y)
ax[1].set_ylabel(label_y)
ax[1].set_xlabel(label_x)
ax[2].set_xlabel(label_x)
ax[3].set_xlabel(label_x)
simstats_palette = ['#1B9E77','#D95F02','#7570B3','#E7298A']
z_range = [np.min(pd.concat(stats_filtered)['redshift_float']),
np.max(pd.concat(stats_filtered)['redshift_float'])]
z_range_str = f'{z_range[0]:1.2f} - {z_range[1]:1.2f}' if round(z_range[0]) < round(z_range[1]) else f'{z_range[0]:1.2f}'
items_labels = [
f"{label_x.split(r'quad')[0]} -\\ {label_y.split(r'quad')[0]}",
f"Number of clusters: {np.sum([attr['Number of clusters'] for attr in attrs]):d}",
f"$z$ = {z_range_str:s}",
f"Aperture radius = {stats_filtered[0]['R_aperture'][0] / stats_filtered[0]['R_200_crit'][0]:2.2f} $R_{{200\\ true}}$"
]
info_ax0.text(0.03, 0.97, '\n'.join(items_labels), horizontalalignment='left', verticalalignment='top', size=15, transform=info_ax0.transAxes)
axisinfo_kwargs = dict(
horizontalalignment='right',
verticalalignment='top',
size=15
)
handles = [Patch(facecolor=simstats_palette[i], label=attrs[i]['Simulation'], edgecolor='k', linewidth=1) for i in range(len(attrs))]
leg = info_ax1.legend(handles=handles, loc='lower right', handlelength=1, fontsize=20)
info_ax1.add_artist(leg)
##################################################################################################
# SCATTER PLOTS #
##################################################################################################
plot_type = 'scatterplot'
for ax_idx, axes in enumerate(ax):
axes.set_xscale(data_entry['xscale'])
axes.set_yscale(data_entry['yscale'])
axes.tick_params(direction='in', length=5, top=True, right=True)
if ax_idx == 0:
axes.scatter(
pd.concat(stats_filtered)[data_entry['x']],
pd.concat(stats_filtered)[data_entry['y']],
s=5,
c=simstats_palette[ax_idx-1]
)
axes.text(0.95, 0.95, f'\\textsc{{Total}}', transform=axes.transAxes, **axisinfo_kwargs)
else:
axes.scatter(
stats_filtered[ax_idx-1][data_entry['x']],
stats_filtered[ax_idx-1][data_entry['y']],
s=5,
c=simstats_palette[ax_idx-1]
)
axes.text(0.95, 0.95, f"\\textsc{{{attrs[ax_idx-1]['Simulation']}}}", transform=axes.transAxes, **axisinfo_kwargs)
if not os.path.exists(os.path.join(pathSave, plot_type)):
os.makedirs(os.path.join(pathSave, plot_type))
plt.savefig(os.path.join(pathSave, plot_type, filename))
print(f"[+] Plot {entry_index:3d}/{len(data_entries)} Figure saved: {plot_type:>15s} >> {filename}")
##################################################################################################
# kde PLOTS #
##################################################################################################
plot_type = 'kdeplot'
fig_kde = fig
ax_kde = [fig_kde.axes[i] for i in [1, 3, 4, 5]]
for axes in ax_kde:
for artist in axes.lines + axes.collections:
artist.remove()
x_space = np.linspace(xlims[0], xlims[1], 101)
y_space = np.linspace(ylims[0], ylims[1], 101)
if data_entry['xscale'] is 'log':
x_space = np.linspace(np.log10(xlims[0]), np.log10(xlims[1]), 101)
if data_entry['yscale'] is 'log':
y_space = np.linspace( | np.log10(ylims[0]) | numpy.log10 |
import numpy as np
import xarray as xr
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LongitudeFormatter, LatitudeFormatter
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import geocat.viz.util as gvutil
from geocat.viz import cmaps as gvcmaps
# create an example dataset
da = xr.open_dataset(r'data\noaa_oisst_v2_merged_1982_2020.nc')
sst = da.sst
date = sst.time
ndate = len(date)
ndate = np.arange(len(date))
# define a function to compute a linear trend of a timeseries
def linear_trend(x):
pf = np.polyfit(ndate, x, 1)
# we need to return a dataarray or else xarray's groupby won't be happy
return xr.DataArray(pf[0])
def linear_trend(x):
date = x.time
ndate = np.arange(len(date))
pf = np.polyfit(ndate, x, 1)
# we need to return a dataarray or else xarray's groupby won't be happy
return xr.DataArray(pf[0]*120)
# stack lat and lon into a single dimension called allpoints
stacked = sst.stack(allpoints=['lat', 'lon'])
# apply the function over allpoints to calculate the trend at each point
trend = stacked.groupby('allpoints').apply(linear_trend)
# unstack back to lat lon coordinates
trend_unstacked = trend.unstack('allpoints')
sst_clim = trend_unstacked
sst_clim.lon
## PLOT Figure
# Now plot mean SST climatology
############################################
# Generate figure (set its size (width, height) in inches)
fig = plt.figure(figsize=(7.6, 6.5))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.add_feature(cfeature.LAND,
facecolor="darkgray",
edgecolor='black',
linewidths=1,
zorder=2)
# Usa geocat.viz.util convenience function to set axes parameters
gvutil.set_axes_limits_and_ticks(ax,
ylim=(5,25),
xlim=(80, 100),
xticks=np.arange(80,101 , 5),
yticks=np.arange(5, 26, 5))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax, labelsize=14)
gvutil.add_lat_lon_ticklabels(ax)
gvutil.set_titles_and_labels(ax,
maintitle= 'SST Trend (1982-2020)',
maintitlefontsize= 18,
ylabel='Latitude',
xlabel='Longitude',
labelfontsize=16)
# Remove the degree symbol from tick labels
ax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))
ax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))
# Set contour levels
#levels = np.arange(0.04, 0.25, 0.0)
levels = | np.linspace(0.04,0.25,11) | numpy.linspace |
import sys
import typing
import numba as nb
import numpy as np
@nb.njit
def sa_doubling(
a: np.array,
) -> np.array:
n = a.size
a = np.searchsorted(
np.unique(a),
a,
)
cnt = np.zeros(n + 1, dtype=np.int32)
def count_sort(a):
for x in a: cnt[x + 1] += 1
for i in range(n): cnt[i + 1] += cnt[i]
idx = np.empty(n, dtype=np.int32)
for i in range(n):
x = a[i]
idx[cnt[x]] = i
cnt[x] += 1
cnt[:] = 0
return idx
k = 1
rank = a
while 1:
b = np.zeros(n, dtype=np.int64)
for i in range(n - k):
b[i] = rank[i + k] + 1
ord_b = count_sort(b)
a = rank[ord_b]
ord_a = count_sort(a)
sa = ord_b[ord_a]
c = a[ord_a] << 32 | b[sa]
rank[sa[0]] = 0
for i in range(n - 1):
rank[sa[i + 1]] = rank[sa[i]] + (c[i + 1] > c[i])
k *= 2
if k >= n: break
b[:] = 0
return sa
@nb.njit
def kasai(
a: np.array,
sa: np.array,
) -> np.array:
n = a.size
if n == 0:
return | np.full(n, -1, dtype=np.int32) | numpy.full |
# This is a small chunk of code from the skimage package. It is reproduced
# here because all we need is a couple color conversion routines, and adding
# all of skimage as dependecy is really heavy.
# Copyright (C) 2019, the scikit-image team
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name of skimage nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# skimage/_shared/version_requirements.py:_check_version
# Copyright (c) 2013 The IPython Development Team
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# skimage/_shared/version_requirements.py:is_installed:
# Original Copyright (C) 2009-2011 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# skimage/external/tifffile:
# Copyright (c) 2008-2014, <NAME>
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from scipy import linalg
from warnings import warn
def rgb2xyz(rgb):
"""RGB to XYZ color space conversion.
Parameters
----------
rgb : (..., 3) array_like
The image in RGB format. Final dimension denotes channels.
Returns
-------
out : (..., 3) ndarray
The image in XYZ format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3).
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts from sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _prepare_colorarray(rgb).copy()
mask = arr > 0.04045
arr[mask] = np.power((arr[mask] + 0.055) / 1.055, 2.4)
arr[~mask] /= 12.92
return arr @ xyz_from_rgb.T.astype(arr.dtype)
def lab2xyz(lab, illuminant="D65", observer="2"):
"""CIE-LAB to XYZcolor space conversion.
Parameters
----------
lab : array_like
The image in lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
UserWarning
If any of the pixels are invalid (Z < 0).
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values x_ref
= 95.047, y_ref = 100., z_ref = 108.883. See function 'get_xyz_coords' for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
"""
arr = _prepare_colorarray(lab).copy()
L, a, b = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
y = (L + 16.) / 116.
x = (a / 500.) + y
z = y - (b / 200.)
if np.any(z < 0):
invalid = np.nonzero(z < 0)
warn('Color data out of range: Z < 0 in %s pixels' % invalid[0].size,
stacklevel=2)
z[invalid] = 0
out = np.dstack([x, y, z])
mask = out > 0.2068966
out[mask] = np.power(out[mask], 3.)
out[~mask] = (out[~mask] - 16.0 / 116.) / 7.787
# rescale to the reference white (illuminant)
xyz_ref_white = get_xyz_coords(illuminant, observer)
out *= xyz_ref_white
return out
def xyz2lab(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-LAB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in CIE-LAB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., ..,[ ..,] 3)``.
ValueError
If either the illuminant or the observer angle is unsupported or
unknown.
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2lab
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_lab = xyz2lab(img_xyz)
"""
arr = _prepare_colorarray(xyz)
xyz_ref_white = get_xyz_coords(illuminant, observer)
# scale by CIE XYZ tristimulus values of the reference white point
arr = arr / xyz_ref_white
# Nonlinear distortion and linear transformation
mask = arr > 0.008856
arr[mask] = np.cbrt(arr[mask])
arr[~mask] = 7.787 * arr[~mask] + 16. / 116.
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
# Vector scaling
L = (116. * y) - 16.
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return np.concatenate([x[..., np.newaxis] for x in [L, a, b]], axis=-1)
def lab2rgb(lab, illuminant="D65", observer="2"):
"""Lab to RGB color space conversion.
Parameters
----------
lab : array_like
The image in Lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses lab2xyz and xyz2rgb.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2rgb(lab2xyz(lab, illuminant, observer))
def rgb2lab(rgb, illuminant="D65", observer="2"):
"""RGB to lab color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in Lab format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses rgb2xyz and xyz2lab.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2lab(rgb2xyz(rgb), illuminant, observer)
def lch2lab(lch):
"""CIE-LCH to CIE-LAB color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lch : array_like
The N-D image in CIE-LCH format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LAB format, with same shape as input `lch`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, c, h).
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lch2lab
>>> img = data.astronaut()
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
>>> img_lab2 = lch2lab(img_lch)
"""
lch = _prepare_lab_array(lch)
c, h = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = c * np.cos(h), c * np.sin(h)
return lch
def _prepare_lab_array(arr):
"""Ensure input for lab2lch, lch2lab are well-posed.
Arrays must be in floating point and have at least 3 elements in
last dimension. Return a new array.
"""
arr = np.asarray(arr)
shape = arr.shape
if shape[-1] < 3:
raise ValueError('Input array has less than 3 color channels')
return img_as_float(arr, force_copy=True)
def get_xyz_coords(illuminant, observer):
"""Get the XYZ coordinates of the given illuminant and observer [1]_.
Parameters
----------
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
(x, y, z) : tuple
A tuple with 3 elements containing the XYZ coordinates of the given
illuminant.
Raises
------
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
"""
illuminant = illuminant.upper()
try:
return illuminants[illuminant][observer]
except KeyError:
raise ValueError("Unknown illuminant/observer combination\
(\'{0}\', \'{1}\')".format(illuminant, observer))
def _prepare_colorarray(arr):
"""Check the shape of the array and convert it to
floating point representation.
"""
arr = np.asanyarray(arr)
if arr.ndim not in [3, 4] or arr.shape[-1] != 3:
msg = ("the input array must be have a shape == (.., ..,[ ..,] 3)), " +
"got (" + (", ".join(map(str, arr.shape))) + ")")
raise ValueError(msg)
return img_as_float(arr)
def xyz2rgb(xyz):
"""XYZ to RGB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts to sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2rgb
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_rgb = xyz2rgb(img_xyz)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _convert(rgb_from_xyz, xyz)
mask = arr > 0.0031308
arr[mask] = 1.055 * np.power(arr[mask], 1 / 2.4) - 0.055
arr[~mask] *= 12.92
np.clip(arr, 0, 1, out=arr)
return arr
def _convert(matrix, arr):
"""Do the color space conversion.
Parameters
----------
matrix : array_like
The 3x3 matrix to use.
arr : array_like
The input array.
Returns
-------
out : ndarray, dtype=float
The converted array.
"""
arr = _prepare_colorarray(arr)
return arr @ matrix.T.copy()
# ---------------------------------------------------------------
# Primaries for the coordinate systems
# ---------------------------------------------------------------
cie_primaries = np.array([700, 546.1, 435.8])
sb_primaries = np.array([1. / 155, 1. / 190, 1. / 225]) * 1e5
# ---------------------------------------------------------------
# Matrices that define conversion between different color spaces
# ---------------------------------------------------------------
# From sRGB specification
xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
rgb_from_xyz = linalg.inv(xyz_from_rgb)
# From https://en.wikipedia.org/wiki/CIE_1931_color_space
# Note: Travis's code did not have the divide by 0.17697
xyz_from_rgbcie = np.array([[0.49, 0.31, 0.20],
[0.17697, 0.81240, 0.01063],
[0.00, 0.01, 0.99]]) / 0.17697
rgbcie_from_xyz = linalg.inv(xyz_from_rgbcie)
# construct matrices to and from rgb:
rgbcie_from_rgb = rgbcie_from_xyz @ xyz_from_rgb
rgb_from_rgbcie = rgb_from_xyz @ xyz_from_rgbcie
gray_from_rgb = np.array([[0.2125, 0.7154, 0.0721],
[0, 0, 0],
[0, 0, 0]])
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
rgb_from_yuv = linalg.inv(yuv_from_rgb)
yiq_from_rgb = np.array([[0.299 , 0.587 , 0.114 ],
[0.59590059, -0.27455667, -0.32134392],
[0.21153661, -0.52273617, 0.31119955]])
rgb_from_yiq = linalg.inv(yiq_from_rgb)
ypbpr_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.168736,-0.331264, 0.5 ],
[ 0.5 ,-0.418688,-0.081312]])
rgb_from_ypbpr = linalg.inv(ypbpr_from_rgb)
ycbcr_from_rgb = np.array([[ 65.481, 128.553, 24.966],
[ -37.797, -74.203, 112.0 ],
[ 112.0 , -93.786, -18.214]])
rgb_from_ycbcr = linalg.inv(ycbcr_from_rgb)
ydbdr_from_rgb = np.array([[ 0.299, 0.587, 0.114],
[ -0.45 , -0.883, 1.333],
[ -1.333, 1.116, 0.217]])
rgb_from_ydbdr = linalg.inv(ydbdr_from_rgb)
# CIE LAB constants for Observer=2A, Illuminant=D65
# NOTE: this is actually the XYZ values for the illuminant above.
lab_ref_white = np.array([0.95047, 1., 1.08883])
# XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I
# we have:
#
# illuminant[I][0] corresponds to the XYZ coordinates for the 2 degree
# field of view.
#
# illuminant[I][1] corresponds to the XYZ coordinates for the 10 degree
# field of view.
#
# The XYZ coordinates are calculated from [1], using the formula:
#
# X = x * ( Y / y )
# Y = Y
# Z = ( 1 - x - y ) * ( Y / y )
#
# where Y = 1. The only exception is the illuminant "D65" with aperture angle
# 2, whose coordinates are copied from 'lab_ref_white' for
# backward-compatibility reasons.
#
# References
# ----------
# .. [1] https://en.wikipedia.org/wiki/Standard_illuminant
illuminants = \
{"A": {'2': (1.098466069456375, 1, 0.3558228003436005),
'10': (1.111420406956693, 1, 0.3519978321919493)},
"D50": {'2': (0.9642119944211994, 1, 0.8251882845188288),
'10': (0.9672062750333777, 1, 0.8142801513128616)},
"D55": {'2': (0.956797052643698, 1, 0.9214805860173273),
'10': (0.9579665682254781, 1, 0.9092525159847462)},
"D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white`
'10': (0.94809667673716, 1, 1.0730513595166162)},
"D75": {'2': (0.9497220898840717, 1, 1.226393520724154),
'10': (0.9441713925645873, 1, 1.2064272211720228)},
"E": {'2': (1.0, 1.0, 1.0),
'10': (1.0, 1.0, 1.0)}}
__all__ = ['img_as_float32', 'img_as_float64', 'img_as_float',
#'img_as_int', 'img_as_uint', 'img_as_ubyte',
#'img_as_bool',
'dtype_limits']
# For integers Numpy uses `_integer_types` basis internally, and builds a leaky
# `np.XintYY` abstraction on top of it. This leads to situations when, for
# example, there are two np.Xint64 dtypes with the same attributes but
# different object references. In order to avoid any potential issues,
# we use the basis dtypes here. For more information, see:
# - https://github.com/scikit-image/scikit-image/issues/3043
# For convenience, for these dtypes we indicate also the possible bit depths
# (some of them are platform specific). For the details, see:
# http://www.unix.org/whitepapers/64bit.html
_integer_types = (np.byte, np.ubyte, # 8 bits
np.short, np.ushort, # 16 bits
np.intc, np.uintc, # 16 or 32 or 64 bits
np.int_, np.uint, # 32 or 64 bits
np.longlong, np.ulonglong) # 64 bits
_integer_ranges = {t: (np.iinfo(t).min, | np.iinfo(t) | numpy.iinfo |
import numpy as np
import matplotlib
matplotlib.use('AGG')
import matplotlib.colors as colors
matplotlib.rcParams['font.size'] = 12
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['savefig.bbox'] = 'tight'
matplotlib.rcParams['savefig.pad_inches'] = 0
smallfont = {'family': 'serif', 'size': 12}
font = {'family': 'serif', 'size': 18}
bigfont = {'family': 'serif', 'size': 40}
giantfont = {'family': 'serif', 'size': 80}
ggiantfont = {'family': 'serif', 'size': 120}
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import sys
from sklearn import linear_model
def mantexp(num):
# Generate the mantissa an exponent
if num == 0:
return 0,0
exponent = int(np.log10(np.abs(num)))
mantissa = num/(10**exponent)
if np.abs(mantissa) < 1:
mantissa *= 10
exponent -= 1
return mantissa,exponent
def generate_sci_fmt(xmin,xmax,numdiv=100):
# Print to two sig figs
eps = (xmax-xmin)/numdiv
#print("eps = {}".format(eps))
general_sci_fmt = lambda num,pos: sci_not_precision(num,eps)
return general_sci_fmt
def sci_not_precision(num,eps):
# Specify a number to an accuracy of epsilon/10
#print("num = {}".format(num))
if np.abs(num) < eps*1e-3:
#print("num = {}; returning 0".format(num))
return "0"
Mn,En = mantexp(num)
Me,Ee = mantexp(eps)
# Need Ee+1 places past the decimal point
#digs = np.abs(Ee) # Wrong for large eps
# Need enough digits to distinguish num from num+eps
digs = max(0,En-Ee)
#print("digs = {}".format(digs))
num_to_prec = eval(("{:."+str(digs)+"e}").format(num))
#print("num_to_prec = {}".format(num_to_prec))
# Now format it accordingly
Mn,En = mantexp(num_to_prec)
if np.abs(En) > 2:
#sci = ("{:."+str(digs)+"f}\\times 10^{}").format(Mn,En)
sci = ("{:."+str(digs)+"f}").format(Mn)
#sci = "%s\\times 10^{%d}"%(sci,En)
sci = "%se%d"%(sci,En)
else:
#sci = ("{:."+str(digs)+"f}").format(num_to_prec)
sci = ("{:."+str(digs)+"f}").format(num_to_prec)
return sci #num_to_prec
def fmt(num,pos):
return '{:.1f}'.format(num)
def fmt2(num,pos):
return '{:.2f}'.format(num)
def fmt3(num,pos):
return '{:.3f}'.format(num)
def sci_fmt(num,lim):
return '{:.1e}'.format(num)
def sci_fmt_short(num,lim):
return '{:.0e}'.format(num)
def sci_fmt_latex0(num):
# Convert a number to scientific notation
exponent = int(np.log10(np.abs(num)))
mantissa = num/(10**exponent)
if np.abs(mantissa) < 1:
mantissa += np.sign(mantissa)
exponent -= 1
if exponent != 0:
sci = "%.0f\\times 10^{%d}" % (mantissa,exponent)
else:
sci = r"%.0f" % mantissa
return sci
def sci_fmt_latex1(num):
# Convert a number to scientific notation
exponent = int(np.log10(np.abs(num)))
mantissa = num/(10**exponent)
if np.abs(mantissa) < 1:
mantissa += np.sign(mantissa)
exponent -= 1
if exponent != 0:
sci = "%.1f\\times 10^{%d}" % (mantissa,exponent)
else:
sci = r"%.1f" % mantissa
return sci
def sci_fmt_latex(num):
# Convert a number to scientific notation
exponent = int(np.log10(np.abs(num)))
mantissa = num/(10**exponent)
if np.abs(mantissa) < 1:
mantissa += np.sign(mantissa)
exponent -= 1
if exponent != 0:
sci = "%.2f\\times 10^{%d}" % (mantissa,exponent)
else:
sci = r"$%.2f$" % mantissa
return sci
def mean_uncertainty(X,num_blocks=10):
# Given a list of numbers X, return the mean and the uncertainty in the mean
N = len(X)
block_size = int(N/num_blocks)
N = block_size*num_blocks # might be less than len(X), but not by more than block_size-1
block_means = np.zeros(num_blocks)
idx = np.arange(N).reshape((num_blocks,block_size))
for i in range(num_blocks):
block_means[i] = np.mean(X[idx[i]])
unc = np.std(block_means)
print("mean(X) = {}. min(block_means) = {}. max(block_means) = {}, unc = {}".format(np.mean(X),np.min(block_means),np.max(block_means),unc))
return unc
def both_grids(bounds,shp):
# This time shp is the number of cells
Nc = np.prod(shp-1) # Number of centers
Ne = np.prod(shp) # Number of edges
center_grid = np.array(np.unravel_index(np.arange(Nc),shp-1)).T
edge_grid = np.array(np.unravel_index(np.arange(Ne),shp)).T
dx = (bounds[:,1] - bounds[:,0])/(shp - 1)
center_grid = bounds[:,0] + dx * (center_grid + 0.5)
edge_grid = bounds[:,0] + dx * edge_grid
return center_grid,edge_grid,dx
def project_field(field,weight,theta_x,shp=None,avg_flag=True,bounds=None):
if np.min(weight) < 0:
sys.exit("Negative weights")
# Given a vector-valued observable function evaluation theta_x, find the mean
# and standard deviation of the field across remaining dimensions
# Also return some integrated version of the standard deviation
thdim = theta_x.shape[1]
if shp is None: shp = 20*np.ones(thdim,dtype=int) # number of INTERIOR
if bounds is None:
bounds = np.array([np.min(theta_x,0)-1e-10,np.max(theta_x,0)+1e-10]).T
cgrid,egrid,dth = both_grids(bounds, shp+1)
thaxes = [np.linspace(bounds[i,0]+dth[i]/2,bounds[i,1]-dth[i]/2,shp[i]) for i in range(thdim)]
data_bins = ((theta_x - bounds[:,0])/dth).astype(int)
for d in range(len(shp)):
data_bins[:,d] = np.maximum(data_bins[:,d],0)
data_bins[:,d] = np.minimum(data_bins[:,d],shp[d]-1)
data_bins_flat = np.ravel_multi_index(data_bins.T,shp) # maps data points to bin
Ncell = np.prod(shp)
filler = np.nan if avg_flag else 0.0
field_mean = filler*np.ones(Ncell)
field_std = filler*np.ones(Ncell)
for i in range(Ncell):
idx = np.where(data_bins_flat == i)[0]
if len(idx) > 0 and not np.all(np.isnan(field[idx])):
weightsum = np.sum(weight[idx]*(1-np.isnan(field[idx])))
field_mean[i] = np.nansum(field[idx]*weight[idx])
#if avg_flag and (weightsum == 0):
# sys.exit("Doh! supposed to average, but weights are zero!")
if avg_flag and (weightsum != 0):
field_mean[i] *= 1/weightsum
field_std[i] = np.sqrt(np.nansum((field[idx]-field_mean[i])**2*weight[idx]))
field_std[i] *= 1/np.sqrt(weightsum)
field_range = np.nanmax(field[idx])-np.nanmin(field[idx])
#if (len(idx) > 1) and (field_mean[i] < np.min(field[idx])-0.05*field_range or field_mean[i] > np.max(field[idx])+0.05*field_range):
if (field_mean[i] < np.min(field[idx])) and np.abs((field_mean[i] - np.min(field[idx]))/np.min(field[idx])) > 0.05:
sys.exit("Doh! Too low! field_mean[i]={}, min(field[idx])={}".format(field_mean[i],np.min(field[idx])))
if (field_mean[i] > np.max(field[idx])) and np.abs((field_mean[i] - np.max(field[idx]))/np.max(field[idx])) > 0.05:
sys.exit("Doh! Too high! field_mean[i]={}, max(field[idx])={}".format(field_mean[i],np.max(field[idx])))
#sys.exit("Doh! Average is outside the bounds! len(idx)={}\n field_mean[i] = {}\n field[idx] in ({},{})\n weights in ({},{})\n".format(len(idx),field_mean[i],np.min(field[idx]),np.max(field[idx]),np.min(weight[idx]),np.max(weight[idx])))
field_std_L2 = np.sqrt(np.nansum(field_std**2)/Ncell) #*np.prod(dth))
field_std_Linf = np.nanmax(field_std)*np.prod(dth)
return shp,dth,thaxes,cgrid,field_mean,field_std,field_std_L2,field_std_Linf,bounds
def plot_field_1d(theta,u,weight,shp=[20,],uname="",thetaname="",avg_flag=True,std_flag=False,fig=None,ax=None,color='black',label="",linestyle='-',linewidth=1,orientation='horizontal',units=1.0,unit_symbol="",eq_ax=False,density_flag=False):
shp = np.array(shp)
# Plot a 1d scatterplot of a field average across remaining dimensions
print("avg_flag = {}".format(avg_flag))
shp,dth,thaxes,cgrid,u_mean,u_std,u_std_L2,u_std_Linf,_ = project_field(u,weight,theta.reshape(-1,1),shp,avg_flag=avg_flag)
if density_flag:
u_mean *= dth[0]/units
u_std *= dth[0]/units
print("shp0 = {}, dth={}".format(shp,dth*units))
print("thaxes in ({},{})".format(thaxes[0][0]*units,thaxes[0][-1]*units))
print("u in ({},{}), u_mean in ({},{})".format(np.nanmin(u),np.nanmax(u),np.nanmin(u_mean),np.nanmax(u_mean)))
if (fig is None) or (ax is None):
fig,ax = plt.subplots(figsize=(6,6),constrained_layout=True)
if orientation=='horizontal':
handle, = ax.plot(units*thaxes[0],u_mean,marker='o',linestyle=linestyle,color=color,label=label,linewidth=linewidth)
if std_flag:
ax.plot(units*thaxes[0],u_mean-u_std,color=color,linestyle='--',linewidth=linewidth)
ax.plot(units*thaxes[0],u_mean+u_std,color=color,linestyle='--',linewidth=linewidth)
xlab = thetaname
if len(unit_symbol) > 0: xlab += " ({})".format(unit_symbol)
ax.set_xlabel(xlab,fontdict=font)
ax.set_ylabel(uname,fontdict=font)
ax.set_xlim([np.min(units*theta),np.max(units*theta)])
else:
handle, = ax.plot(u_mean,units*thaxes[0],marker='o',linestyle=linestyle,color=color,label=label,linewidth=linewidth)
if std_flag:
ax.plot(u_mean-u_std,units*thaxes[0],color=color,linestyle='--')
ax.plot(u_mean+u_std,units*thaxes[0],color=color,linestyle='--')
ylab = thetaname
if len(unit_symbol) > 0: ylab += " ({})".format(unit_symbol)
print("ylab = {}".format(ylab))
ax.set_ylabel(ylab,fontdict=font)
ax.set_xlabel(uname,fontdict=font)
ax.set_ylim([np.min(units*theta),np.max(units*theta)])
xlim,ylim = ax.get_xlim(),ax.get_ylim()
fmt_x = generate_sci_fmt(xlim[0],xlim[1])
fmt_y = generate_sci_fmt(ylim[0],ylim[1])
ax.xaxis.set_major_formatter(ticker.FuncFormatter(fmt_x))
ax.yaxis.set_major_formatter(ticker.FuncFormatter(fmt_y))
ax.tick_params(axis='x',labelsize=10)
ax.tick_params(axis='y',labelsize=10)
#ax.xaxis.set_major_locator(plt.MaxNLocator(nbins=3))
#ax.yaxis.set_major_locator(plt.MaxNLocator(nbins=5))
if eq_ax:
xylim = np.array([ax.get_xlim(),ax.get_ylim()])
xylim = np.array([np.min(xylim[:,0]),np.max(xylim[:,1])])
ax.set_xlim(xylim)
ax.set_ylim(xylim)
ax.plot(xylim,xylim,color='black',linestyle='--')
return fig,ax,handle
def plot_field_2d(field,weight,theta_x,shp=[20,20],cmap=plt.cm.coolwarm,fieldname="",fun0name="",fun1name="",avg_flag=True,std_flag=True,logscale=False,ss=None,units=np.ones(2),unit_symbols=["",""],cbar_orientation='horizontal',cbar_location='top',fig=None,ax=None,vmin=None,vmax=None,cbar_pad=0.2,fmt_x=None,fmt_y=None):
# The function inside TPT should just extend this one
shp = np.array(shp)
shp,dth,thaxes,cgrid,field_mean,field_std,field_std_L2,field_std_Linf,_ = project_field(field,weight,theta_x,shp,avg_flag=avg_flag)
if std_flag:
if fig is None or ax is None:
fig,ax = plt.subplots(ncols=2,figsize=(12,6))
ax0,ax1 = ax[0],ax[1]
else:
if fig is None or ax is None:
fig,ax = plt.subplots(figsize=(6,6))
ax0 = ax
th01,th10 = np.meshgrid(units[0]*thaxes[0],units[1]*thaxes[1],indexing='ij')
if logscale:
realidx = np.where(np.isnan(field_mean)==0)[0]
if len(realidx) > 0:
posidx = realidx[np.where(field_mean[realidx] > 0)[0]]
#field_mean[posidx] = np.log10(field_mean[posidx])
field_mean[np.setdiff1d(np.arange(np.prod(shp)),posidx)] = np.nan
locator = ticker.LogLocator(numticks=10) if logscale else ticker.MaxNLocator()
im = ax0.contourf(th01,th10,field_mean.reshape(shp),cmap=cmap,locator=locator,zorder=1,vmin=vmin,vmax=vmax)
ax0.set_xlim([np.min(units[0]*thaxes[0]),np.max(units[0]*thaxes[0])])
ax0.set_ylim([np.min(units[1]*thaxes[1]),np.max(units[1]*thaxes[1]) + 0.15*units[1]*np.ptp(thaxes[1])])
#print("eps = {} - {}".format(np.nanmax(field_mean),np.nanmin(field_mean)))
cbar_fmt = generate_sci_fmt(np.nanmin(field_mean),np.nanmax(field_mean),20)
# -------------------
# New colorbar code
ax0_left,ax0_bottom,ax0_width,ax0_height = ax0.get_position().bounds
if cbar_orientation == 'vertical':
sys.exit("Not doing vertical colorbars right now")
elif cbar_orientation == 'horizontal':
if cbar_location == 'bottom':
cbaxes = fig.add_axes([0.2,0.00,0.8,0.01])
elif cbar_location == 'top':
cbaxes = fig.add_axes([ax0_left+0.1*ax0_width,ax0_bottom+0.97*ax0_height,0.8*ax0_width,0.03*ax0_height])
if not logscale:
cbar = plt.colorbar(im, ax=ax0, cax=cbaxes, orientation=cbar_orientation, format=ticker.FuncFormatter(cbar_fmt), ticks=np.linspace(np.nanmin(field_mean),np.nanmax(field_mean),4))
else:
cbar = plt.colorbar(im, ax=ax0, cax=cbaxes, ticks=10.0**(np.linspace(np.log10(np.nanmin(field_mean)),np.log10(np.nanmax(field_mean)),4).astype(int)), orientation='horizontal')
cbar.ax.tick_params(labelsize=15)
# -------------------
# Old colorbar code
#if cbar_orientation is not None:
# if not logscale:
# cbar = fig.colorbar(im,ax=ax0,format=ticker.FuncFormatter(cbar_fmt),orientation=cbar_orientation,pad=cbar_pad,ticks=np.linspace(np.nanmin(field_mean),np.nanmax(field_mean),4))
# else:
# cbar = fig.colorbar(im,ax=ax0,orientation=cbar_orientation,pad=cbar_pad)
# cbar.ax.tick_params(labelsize=15)
# --------------------
# Super-old colorbar code
#if logscale:
# logmin = np.nanmin(np.log10(field_mean))
# logmax = np.nanmax(np.log10(field_mean))
# print("logmin = {}, logmax = {}".format(logmin,logmax))
# log_tick_arr = np.linspace(np.nanmin(np.log10(field_mean)),np.nanmax(np.log10(field_mean)),4)
# print("log_tick_arr = {}".format(log_tick_arr))
# locator = ticker.FixedLocator(10**log_tick_arr)
#else:
# locator = ticker.MaxNLocator(nbins=3)
# cbar.locator = locator #ticker.MaxNLocator(nbins=3)
# cbar.update_ticks()
# --------------------
ax0.tick_params(axis='x',labelsize=14)
ax0.tick_params(axis='y',labelsize=14)
xlim,ylim = ax0.get_xlim(),ax0.get_ylim()
fmt_x = generate_sci_fmt(xlim[0],xlim[1])
fmt_y = generate_sci_fmt(ylim[0],ylim[1])
#if fmt_x is None:
# fmt_x = fmt if xlim[1]-xlim[0]<1e3 else sci_fmt
#if fmt_y is None:
# fmt_y = fmt if xlim[1]-xlim[0]<1e3 else sci_fmt
ax0.xaxis.set_major_formatter(ticker.FuncFormatter(fmt_x))
ax0.yaxis.set_major_formatter(ticker.FuncFormatter(fmt_y))
ax0.xaxis.set_major_locator(ticker.MaxNLocator(nbins=4))
if std_flag:
im = ax1.contourf(th01,th10,field_std.reshape(shp),cmap=plt.cm.magma)
ax1.tick_params(axis='x',labelsize=10)
ax1.tick_params(axis='y',labelsize=10)
ax0.set_title("{}".format(fieldname),fontdict=font,y=1.0) #,loc='left')
xlab = fun0name
if len(unit_symbols[0]) > 0: xlab += " [{}]".format(unit_symbols[0])
ylab = fun1name
if len(unit_symbols[1]) > 0: ylab += " [{}]".format(unit_symbols[1])
ax0.set_xlabel("{}".format(xlab),fontdict=font)
ax0.set_ylabel("{}".format(ylab),fontdict=font)
if std_flag:
cbar = fig.colorbar(im,ax=ax[1],format=ticker.FuncFormatter(fmt),orientation=cbar_orientation,pad=0.2,ticks=np.linspace(np.nanmin(field_std),np.nanmax(field_std),4))
cbar.ax.tick_params(labelsize=10)
ax1.set_title(r"Std; $L^2=%.2e$"%(field_std_L2),fontdict=font)
ax1.set_xlabel("{}".format(xlab),fontdict=font)
ax1.set_ylabel("{}".format(ylab),fontdict=font)
return fig,ax
def reweight_data(x,theta_fun,algo_params,theta_pdf):
# theta_fun is a CV space; theta_pdf is a density function on that CV space (need not be normalized)
# Given a reference dataset meant to be pi-distributed, resample
# ref_data could be transformed
Nx = len(x)
theta_x = theta_fun(x,algo_params)
theta_weights = theta_pdf(theta_x)
shp,dth,thaxes,cgrid,field_mean,field_std,field_std_L2,field_std_Linf,bounds = project_field(np.ones(Nx),np.ones(Nx),theta_x,avg_flag=False)
lower_bounds = np.array([th[0] for th in thaxes])
data_bins = ((theta_x - lower_bounds)/dth).astype(int)
data_bins_flat = np.ravel_multi_index(data_bins.T,shp).T
empirical_weights = field_mean[data_bins_flat]
print("empirical weights: min={}, max={}, mean={}, std={}".format(np.min(empirical_weights),np.max(empirical_weights),np.mean(empirical_weights),np.std(empirical_weights)))
sample_weights = theta_weights*(empirical_weights!=0) / (empirical_weights + 1*(empirical_weights==0))
#sample_weights = 1*(empirical_weights==0) / (empirical_weights + 1*(empirical_weights==0))
sample_weights *= 1.0/np.sum(sample_weights)
return sample_weights
def compare_fields(theta0,theta1,u0,u1,weights0,weights1,shp=None,avg_flag=True,subset_flag=True):
# u_emp is some timeseries that is a function following the long trajectory. u0 is its computed (conditional) expectation
# theta_fun is some CV space that we will grid up and compare u0 to u1 averaged over each box
N0 = len(theta0)
N1 = len(theta1)
if subset_flag:
ss0 = np.random.choice(np.arange(N0),size=min(N0,10000),replace=True)
ss1 = np.random.choice(np.arange(N1),size=min(N1,10000),replace=True)
else:
ss0 = np.arange(N0)
ss1 = np.arange(N1)
if shp is None: shp = 10*np.ones(2,dtype=int)
shp = np.array(shp)
shp,dth,thaxes,cgrid,u0_grid,u0_std,u0_std_L2,u0_std_Linf,bounds = project_field(u0[ss0],weights0[ss0]/np.sum(weights0[ss0]),theta0[ss0],avg_flag=avg_flag,shp=shp)
_,_,_,_,u1_grid,_,_,_,_ = project_field(u1[ss1],weights1[ss1]/np.sum(weights1[ss1]),theta1[ss1],avg_flag=avg_flag,shp=shp,bounds=bounds)
return shp,dth,thaxes,cgrid,u0_grid,u1_grid
def compare_plot_fields_1d(theta0,theta1,u0,u1,weights0,weights1,theta_name="",u_names=["",""],theta_units=1.0,theta_unit_symbol="",avg_flag=True,logscale=False,shp=None):
N0 = len(theta0)
N1 = len(theta1)
shp,dth,thaxes,cgrid,u0_grid,u1_grid = compare_fields(theta0,theta1,u0,u1,weights0,weights1,shp=shp,avg_flag=avg_flag)
# Also get the weights for each bin
_,_,_,_,w0_grid,w1_grid = compare_fields(theta0,theta1,np.ones(N0),np.ones(N1),weights0,weights1,shp=shp,avg_flag=False)
# Compute some total error metric
fig,ax = plt.subplots(ncols=2,figsize=(12,6))
scatter_subset = np.where((u0_grid>0)*(u1_grid>0))[0] if logscale else np.arange(len(u0_grid))
total_error = np.sqrt(np.nansum((u0_grid-u1_grid)**2*w1_grid)/np.nansum(w1_grid))
h = ax[0].scatter(u0_grid[scatter_subset],u1_grid[scatter_subset],marker='o',color='black',s=50*w1_grid[scatter_subset]/np.max(w1_grid[scatter_subset]),label=r"Avg. Error = %3.3e"%(total_error))
ax[0].legend(handles=[h],prop={'size':12})
umin = min(np.nanmin(u0_grid[scatter_subset]), | np.nanmin(u1_grid[scatter_subset]) | numpy.nanmin |
"""Define the Problem class and a FakeComm class for non-MPI users."""
import sys
import pprint
import os
import logging
import weakref
import time
from collections import defaultdict, namedtuple, OrderedDict
from fnmatch import fnmatchcase
from itertools import product
from io import StringIO
import numpy as np
import scipy.sparse as sparse
from openmdao.core.component import Component
from openmdao.jacobians.dictionary_jacobian import _CheckingJacobian
from openmdao.core.driver import Driver, record_iteration
from openmdao.core.group import Group, System
from openmdao.core.total_jac import _TotalJacInfo
from openmdao.core.constants import _DEFAULT_OUT_STREAM, _UNDEFINED
from openmdao.approximation_schemes.complex_step import ComplexStep
from openmdao.approximation_schemes.finite_difference import FiniteDifference
from openmdao.solvers.solver import SolverInfo
from openmdao.error_checking.check_config import _default_checks, _all_checks, \
_all_non_redundant_checks
from openmdao.recorders.recording_iteration_stack import _RecIteration
from openmdao.recorders.recording_manager import RecordingManager, record_viewer_data, \
record_model_options
from openmdao.utils.record_util import create_local_meta
from openmdao.utils.general_utils import ContainsAll, pad_name, _is_slicer_op, LocalRangeIterable
from openmdao.utils.mpi import MPI, FakeComm, multi_proc_exception_check, check_mpi_env
from openmdao.utils.name_maps import name2abs_names
from openmdao.utils.options_dictionary import OptionsDictionary
from openmdao.utils.units import simplify_unit
from openmdao.core.constants import _SetupStatus
from openmdao.utils.name_maps import abs_key2rel_key
from openmdao.vectors.vector import _full_slice
from openmdao.vectors.default_vector import DefaultVector
from openmdao.utils.logger_utils import get_logger, TestLogger
import openmdao.utils.coloring as coloring_mod
from openmdao.utils.hooks import _setup_hooks
from openmdao.utils.indexer import indexer
from openmdao.utils.om_warnings import issue_warning, DerivativesWarning, warn_deprecation, \
OMInvalidCheckDerivativesOptionsWarning
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
from openmdao.utils.name_maps import rel_key2abs_key, rel_name2abs_name
_contains_all = ContainsAll()
CITATION = """@article{openmdao_2019,
Author={<NAME> and <NAME> and <NAME>.
Martins and <NAME> and <NAME>},
Title="{OpenMDAO: An Open-Source Framework for Multidisciplinary
Design, Analysis, and Optimization}",
Journal="{Structural and Multidisciplinary Optimization}",
Year={2019},
Publisher={Springer},
pdf={http://openmdao.org/pubs/openmdao_overview_2019.pdf},
note= {In Press}
}"""
class Problem(object):
"""
Top-level container for the systems and drivers.
Parameters
----------
model : <System> or None
The top-level <System>. If not specified, an empty <Group> will be created.
driver : <Driver> or None
The driver for the problem. If not specified, a simple "Run Once" driver will be used.
comm : MPI.Comm or <FakeComm> or None
The global communicator.
name : str
Problem name. Can be used to specify a Problem instance when multiple Problems
exist.
**options : named args
All remaining named args are converted to options.
Attributes
----------
model : <System>
Pointer to the top-level <System> object (root node in the tree).
comm : MPI.Comm or <FakeComm>
The global communicator.
driver : <Driver>
Slot for the driver. The default driver is `Driver`, which just runs
the model once.
_mode : 'fwd' or 'rev'
Derivatives calculation mode, 'fwd' for forward, and 'rev' for
reverse (adjoint).
_orig_mode : 'fwd', 'rev', or 'auto'
Derivatives calculation mode assigned by the user. If set to 'auto', _mode will be
automatically assigned to 'fwd' or 'rev' based on relative sizes of design variables vs.
responses.
_initial_condition_cache : dict
Any initial conditions that are set at the problem level via setitem are cached here
until they can be processed.
cite : str
Listing of relevant citations that should be referenced when
publishing work that uses this class.
options : <OptionsDictionary>
Dictionary with general options for the problem.
recording_options : <OptionsDictionary>
Dictionary with problem recording options.
_rec_mgr : <RecordingManager>
Object that manages all recorders added to this problem.
_check : bool
If True, call check_config at the end of final_setup.
_filtered_vars_to_record : dict
Dictionary of lists of design vars, constraints, etc. to record.
_logger : object or None
Object for logging config checks if _check is True.
_name : str
Problem name.
_system_options_recorded : bool
A flag to indicate whether the system options for all the systems have been recorded
_metadata : dict
Problem level metadata.
_run_counter : int
The number of times run_driver or run_model has been called.
_warned : bool
Bool to check if `value` deprecation warning has occured yet
"""
def __init__(self, model=None, driver=None, comm=None, name=None, **options):
"""
Initialize attributes.
"""
self.cite = CITATION
self._name = name
self._warned = False
if comm is None:
use_mpi = check_mpi_env()
if use_mpi is False:
comm = FakeComm()
else:
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
except ImportError:
comm = FakeComm()
if model is None:
self.model = Group()
elif isinstance(model, System):
self.model = model
else:
raise TypeError(self.msginfo +
": The value provided for 'model' is not a valid System.")
if driver is None:
self.driver = Driver()
elif isinstance(driver, Driver):
self.driver = driver
else:
raise TypeError(self.msginfo +
": The value provided for 'driver' is not a valid Driver.")
self.comm = comm
self._mode = None # mode is assigned in setup()
self._initial_condition_cache = {}
self._metadata = None
self._run_counter = -1
self._system_options_recorded = False
self._rec_mgr = RecordingManager()
# General options
self.options = OptionsDictionary(parent_name=type(self).__name__)
self.options.declare('coloring_dir', types=str,
default=os.path.join(os.getcwd(), 'coloring_files'),
desc='Directory containing coloring files (if any) for this Problem.')
self.options.update(options)
# Case recording options
self.recording_options = OptionsDictionary(parent_name=type(self).__name__)
self.recording_options.declare('record_desvars', types=bool, default=True,
desc='Set to True to record design variables at the '
'problem level')
self.recording_options.declare('record_objectives', types=bool, default=True,
desc='Set to True to record objectives at the problem level')
self.recording_options.declare('record_constraints', types=bool, default=True,
desc='Set to True to record constraints at the '
'problem level')
self.recording_options.declare('record_responses', types=bool, default=False,
desc='Set True to record constraints and objectives at the '
'problem level.')
self.recording_options.declare('record_inputs', types=bool, default=False,
desc='Set True to record inputs at the '
'problem level.')
self.recording_options.declare('record_outputs', types=bool, default=True,
desc='Set True to record outputs at the '
'problem level.')
self.recording_options.declare('record_residuals', types=bool, default=False,
desc='Set True to record residuals at the '
'problem level.')
self.recording_options.declare('record_derivatives', types=bool, default=False,
desc='Set to True to record derivatives for the problem '
'level')
self.recording_options.declare('record_abs_error', types=bool, default=True,
desc='Set to True to record absolute error of '
'model nonlinear solver')
self.recording_options.declare('record_rel_error', types=bool, default=True,
desc='Set to True to record relative error of model \
nonlinear solver')
self.recording_options.declare('includes', types=list, default=['*'],
desc='Patterns for variables to include in recording. \
Uses fnmatch wildcards')
self.recording_options.declare('excludes', types=list, default=[],
desc='Patterns for vars to exclude in recording '
'(processed post-includes). Uses fnmatch wildcards')
_setup_hooks(self)
def _get_var_abs_name(self, name):
if name in self.model._var_allprocs_abs2meta:
return name
elif name in self.model._var_allprocs_prom2abs_list['output']:
return self.model._var_allprocs_prom2abs_list['output'][name][0]
elif name in self.model._var_allprocs_prom2abs_list['input']:
abs_names = self.model._var_allprocs_prom2abs_list['input'][name]
if len(abs_names) == 1:
return abs_names[0]
else:
raise KeyError("{}: Using promoted name `{}' is ambiguous and matches unconnected "
"inputs %s. Use absolute name to disambiguate.".format(self.msginfo,
name,
abs_names))
raise KeyError('{}: Variable "{}" not found.'.format(self.msginfo, name))
@property
def msginfo(self):
"""
Return info to prepend to messages.
Returns
-------
str
Info to prepend to messages.
"""
if self._name is None:
return type(self).__name__
return '{} {}'.format(type(self).__name__, self._name)
def _get_inst_id(self):
return self._name
def is_local(self, name):
"""
Return True if the named variable or system is local to the current process.
Parameters
----------
name : str
Name of a variable or system.
Returns
-------
bool
True if the named system or variable is local to this process.
"""
if self._metadata is None:
raise RuntimeError("{}: is_local('{}') was called before setup() "
"completed.".format(self.msginfo, name))
try:
abs_name = self._get_var_abs_name(name)
except KeyError:
sub = self.model._get_subsystem(name)
return sub is not None and sub._is_local
# variable exists, but may be remote
return abs_name in self.model._var_abs2meta['input'] or \
abs_name in self.model._var_abs2meta['output']
def _get_cached_val(self, name, get_remote=False):
# We have set and cached already
if name in self._initial_condition_cache:
return self._initial_condition_cache[name]
# Vector not setup, so we need to pull values from saved metadata request.
else:
proms = self.model._var_allprocs_prom2abs_list
meta = self.model._var_abs2meta
try:
conns = self.model._conn_abs_in2out
except AttributeError:
conns = {}
abs_names = name2abs_names(self.model, name)
if not abs_names:
raise KeyError('{}: Variable "{}" not found.'.format(self.model.msginfo, name))
abs_name = abs_names[0]
vars_to_gather = self._metadata['vars_to_gather']
io = 'output' if abs_name in meta['output'] else 'input'
if abs_name in meta[io]:
if abs_name in conns:
val = meta['output'][conns[abs_name]]['val']
else:
val = meta[io][abs_name]['val']
if get_remote and abs_name in vars_to_gather:
owner = vars_to_gather[abs_name]
if self.model.comm.rank == owner:
self.model.comm.bcast(val, root=owner)
else:
val = self.model.comm.bcast(None, root=owner)
if val is not _UNDEFINED:
# Need to cache the "get" in case the user calls in-place numpy operations.
self._initial_condition_cache[name] = val
return val
@property
def _recording_iter(self):
return self._metadata['recording_iter']
def __getitem__(self, name):
"""
Get an output/input variable.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
Returns
-------
float or ndarray or any python object
the requested output/input variable.
"""
return self.get_val(name, get_remote=None)
def get_val(self, name, units=None, indices=None, get_remote=False):
"""
Get an output/input variable.
Function is used if you want to specify display units.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
units : str, optional
Units to convert to before return.
indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional
Indices or slice to return.
get_remote : bool or None
If True, retrieve the value even if it is on a remote process. Note that if the
variable is remote on ANY process, this function must be called on EVERY process
in the Problem's MPI communicator.
If False, only retrieve the value if it is on the current process, or only the part
of the value that's on the current process for a distributed variable.
If None and the variable is remote or distributed, a RuntimeError will be raised.
Returns
-------
object
The value of the requested output/input variable.
"""
if self._metadata['setup_status'] == _SetupStatus.POST_SETUP:
val = self._get_cached_val(name, get_remote=get_remote)
if val is not _UNDEFINED:
if indices is not None:
val = val[indices]
if units is not None:
val = self.model.convert2units(name, val, simplify_unit(units))
else:
val = self.model.get_val(name, units=units, indices=indices, get_remote=get_remote,
from_src=True)
if val is _UNDEFINED:
if get_remote:
raise KeyError('{}: Variable name "{}" not found.'.format(self.msginfo, name))
else:
raise RuntimeError(f"{self.model.msginfo}: Variable '{name}' is not local to "
f"rank {self.comm.rank}. You can retrieve values from "
"other processes using `get_val(<name>, get_remote=True)`.")
return val
def __setitem__(self, name, value):
"""
Set an output/input variable.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
value : float or ndarray or any python object
value to set this variable to.
"""
self.set_val(name, value)
def set_val(self, name, val=None, units=None, indices=None, **kwargs):
"""
Set an output/input variable.
Function is used if you want to set a value using a different unit.
Parameters
----------
name : str
Promoted or relative variable name in the root system's namespace.
val : float or ndarray or list or None
Value to set this variable to.
units : str, optional
Units that value is defined in.
indices : int or list of ints or tuple of ints or int ndarray or Iterable or None, optional
Indices or slice to set to specified value.
**kwargs : dict
Additional keyword argument for deprecated `value` arg.
"""
if 'value' not in kwargs:
value = None
elif 'value' in kwargs:
value = kwargs['value']
if value is not None and not self._warned:
self._warned = True
warn_deprecation(f"{self.msginfo} 'value' will be deprecated in 4.0. Please use 'val' "
"in the future.")
elif val is not None:
self._warned = True
value = val
model = self.model
if self._metadata is not None:
conns = model._conn_global_abs_in2out
else:
raise RuntimeError(f"{self.msginfo}: '{name}' Cannot call set_val before setup.")
all_meta = model._var_allprocs_abs2meta
loc_meta = model._var_abs2meta
n_proms = 0 # if nonzero, name given was promoted input name w/o a matching prom output
try:
ginputs = model._group_inputs
except AttributeError:
ginputs = {} # could happen if top level system is not a Group
abs_names = name2abs_names(model, name)
if abs_names:
n_proms = len(abs_names) # for output this will never be > 1
if n_proms > 1 and name in ginputs:
abs_name = ginputs[name][0].get('use_tgt', abs_names[0])
else:
abs_name = abs_names[0]
else:
raise KeyError(f'{model.msginfo}: Variable "{name}" not found.')
if abs_name in conns:
src = conns[abs_name]
if abs_name not in model._var_allprocs_discrete['input']:
value = | np.asarray(value) | numpy.asarray |
#!/usr/bin/env python
# coding: utf-8
# !jupyter nbconvert --no-prompt --to=python deconv.ipynb
import numpy as np
from scipy.signal import convolve2d
from os import path, system
from astropy.io import fits
from numpy.fft import fft2, ifft2
from time import perf_counter
def psf_gaussian(npixel=0, ndimension=2, fwhm=0):
cntrd=np.array([(npixel-1)/2., (npixel-1)/2.])
x, y = np.meshgrid(np.arange(npixel)-cntrd[0], np.arange(npixel)-cntrd[1], sparse=False)
d = np.sqrt(x*x+y*y)
mu=0
sigma=fwhm/(2*(2*np.log(2))**(0.5))
psf= np.exp(-( 0.5*(d-mu)**2 / ( sigma**2 ) ) )
return (psf/np.sum(psf)).astype('float64')
def arr_extension(arr, n_ext_max=999, minv=np.finfo('float64').eps):
meps=np.finfo('float64').eps
n_iter=1
ncomp=arr.size
# extension kernel in horizontal/vertical directions
ext_kernel=np.array([[0,1,0],[1,0,1],[0,1,0]])
# extension kernel in diagonal directions
ext_kernel_d=np.array([[1,0,1],[0,0,0],[1,0,1]])
while np.sum(arr != minv) != ncomp:
if n_iter > n_ext_max:
break
# mark only non-minimum values
non_min_mark=(arr != minv)*1
# weight horizontal/vertical and diagnonal direction differently
arr_ext=convolve2d(arr, ext_kernel+ext_kernel_d/2**0.5, mode='same')
# calculate normalization factor
norm_factor_sum=convolve2d(non_min_mark, ext_kernel+ext_kernel_d*8, mode='same')
norm_factor=norm_factor_sum % 8
norm_factor_d=norm_factor_sum // 8
replace_idx=np.nonzero((non_min_mark == 0) & (norm_factor > 0))
repcnt=len(replace_idx[0])
if repcnt > 0:
arr[replace_idx]=np.clip((arr_ext[replace_idx])/
(norm_factor[replace_idx]+norm_factor_d[replace_idx]/2**0.5),meps,None)
n_iter+=1
return arr.astype('float64')
def deconv(data,psf,psi,nit):
# modified from IDL rountine "decon.pro" written by <NAME>
meps=np.finfo('float64').eps
minv=1e-10
dshape=data.shape
psfn=psf.copy()
ngidx=np.nonzero(psfn <= 0)
if len(ngidx) > 0:
psfn[ngidx] = minv
#PSF Normalization
psfn=psfn/np.sum(psfn)
psfn = np.roll(psfn,(int(dshape[0]*0.5),int(dshape[1]*0.5)),(0,1))
norm=np.sum(data)
fpsf=(fft2(psfn))
for i in range(nit):
phi = (ifft2(fft2(psi)*fpsf)).astype('float64')
check_phi=(phi == 0.)
if np.sum(check_phi):
phi = phi+check_phi*meps
div=(data/phi)
psi=psi*((ifft2( | fft2(div) | numpy.fft.fft2 |
"""
The :mod:`scikitplot.metrics` module includes plots for machine learning
evaluation metrics e.g. confusion matrix, silhouette scores, etc.
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import itertools
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import label_binarize
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import silhouette_score
from sklearn.metrics import silhouette_samples
from sklearn.calibration import calibration_curve
from scipy import interp
from scikitplot.helpers import binary_ks_curve, validate_labels
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.metrics.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax
def plot_roc_curve(y_true, y_probas, title='ROC Curves',
curves=('micro', 'macro', 'each_class'),
ax=None, figsize=None, cmap='nipy_spectral',
title_fontsize="large", text_fontsize="medium"):
"""Generates the ROC curves from labels and predicted scores/probabilities
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_probas (array-like, shape (n_samples, n_classes)):
Prediction probabilities for each class returned by a classifier.
title (string, optional): Title of the generated plot. Defaults to
"ROC Curves".
curves (array-like): A listing of which curves should be plotted on the
resulting plot. Defaults to `("micro", "macro", "each_class")`
i.e. "micro" for micro-averaged curve, "macro" for macro-averaged
curve
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot as skplt
>>> nb = GaussianNB()
>>> nb = nb.fit(X_train, y_train)
>>> y_probas = nb.predict_proba(X_test)
>>> skplt.metrics.plot_roc_curve(y_test, y_probas)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_roc_curve.png
:align: center
:alt: ROC Curves
"""
y_true = | np.array(y_true) | numpy.array |
import os
import imageio
import numpy as np
import scipy
def load_images_from_folder(folder_name):
return list(
map(lambda image_name: (
image_name, imageio.imread(os.path.join(folder_name, image_name)) / 255),
os.listdir(folder_name)))
def prepare_input_data(database_folder='./images/database', ground_truth_folder='./images/ground_truth_augmented'):
"""
Loads images from input folders and groups them with their labels.
:param database_folder:
:param ground_truth_folder:
:return:
"""
def remove_svm_from_name(input):
name, data = input
return name.replace('_SVM', ''), data
output = []
input_images = load_images_from_folder(database_folder)
ground_truth = dict(map(remove_svm_from_name, load_images_from_folder(ground_truth_folder)))
for (image_name, image_data) in input_images:
image_output = ground_truth[image_name]
image_output = scipy.misc.imresize(image_output, (110,110, 3)) / 255
output.append(
{
'name': image_name,
'output': image_output,
'input': image_data
}
)
return output
def split_input_data(input_data):
"""
Splits the input data into training and test set using 70:30 ratio.
:param input_data: data to split tuple of (images,labels)
:return: splitted data tuple of tuples (train(images,labels)test(images,labels))
"""
images = [elem['input'] for elem in input_data]
labels = [elem['output'] for elem in input_data]
size = len(images)
train_part = int(size * 0.7)
train_images = np.array(images[:train_part])
train_labels = | np.array(labels[:train_part]) | numpy.array |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
# The separate patch/non-patch combinations aren't that interesting, so skip them
# for GGG unless running from main.
if __name__ == '__main__':
# Patch on 1 only:
print('with patches on 1 only:')
gggp.process(catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
gggp.process(cat, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
gggp.process(cat, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
gggp.process(catp, catp, cat)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
gggp.process(cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=1.0*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
gggp.process(catp, cat, catp)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.7*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.5*tol_factor)
# Finally a set (with all patches) using the GGGCrossCorrelation class.
gggc = treecorr.GGGCrossCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
print('CrossCorrelation:')
gggc.process(catp, catp, catp)
for g in gggc._all:
print(g.ntri.ravel())
print(g.gam0.ravel())
print(g.vargam0.ravel())
np.testing.assert_allclose(g.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam0, ggg.vargam0, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam1, ggg.vargam1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam2, ggg.vargam2, rtol=0.05 * tol_factor)
np.testing.assert_allclose(g.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(g.vargam3, ggg.vargam3, rtol=0.05 * tol_factor)
fc = lambda gggc: np.concatenate([
[np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)]
for g in gggc._all])
print('jackknife:')
cov = gggc.estimate_cov('jackknife', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggc.estimate_cov('sample', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggc.estimate_cov('marked_bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggc.estimate_cov('bootstrap', func=fc)
print(np.diagonal(cov).real)
for i in range(6):
v = np.diagonal(cov)[i*4:(i+1)*4]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_ggg))))
np.testing.assert_allclose(np.log(v), np.log(var_ggg), atol=0.3*tol_factor)
# Without func, don't check the accuracy, but make sure it returns something the right shape.
cov = gggc.estimate_cov('jackknife')
assert cov.shape == (48, 48)
@timer
def test_nnn_jk():
# Test jackknife and other covariance estimates for nnn correlations.
if __name__ == '__main__':
# This setup takes about 1200 sec to run.
nhalo = 300
nsource = 2000
npatch = 16
source_factor = 50
rand_factor = 3
tol_factor = 1
elif False:
# This setup takes about 250 sec to run.
nhalo = 200
nsource = 1000
npatch = 16
source_factor = 50
rand_factor = 2
tol_factor = 2
else:
# This setup takes about 44 sec to run.
nhalo = 100
nsource = 500
npatch = 8
source_factor = 30
rand_factor = 1
tol_factor = 3
file_name = 'data/test_nnn_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
rng = np.random.RandomState()
nruns = 1000
all_nnns = []
all_nnnc = []
t0 = time.time()
for run in range(nruns):
t2 = time.time()
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng)
p = k**3
p /= np.sum(p)
ns = rng.poisson(nsource)
select = rng.choice(range(len(x)), size=ns, replace=False, p=p)
print(run,': ',np.mean(k),np.std(k),np.min(k),np.max(k))
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
rrr.process(rand_cat)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s, _ = ddd.calculateZeta(rrr)
zeta_c, _ = ddd.calculateZeta(rrr, drr, rdd)
print('simple: ',zeta_s.ravel())
print('compensated: ',zeta_c.ravel())
all_nnns.append(zeta_s.ravel())
all_nnnc.append(zeta_c.ravel())
t3 = time.time()
print('time: ',round(t3-t2),round((t3-t0)/60),round((t3-t0)*(nruns/(run+1)-1)/60))
mean_nnns = np.mean(all_nnns, axis=0)
var_nnns = np.var(all_nnns, axis=0)
mean_nnnc = np.mean(all_nnnc, axis=0)
var_nnnc = np.var(all_nnnc, axis=0)
np.savez(file_name, mean_nnns=mean_nnns, var_nnns=var_nnns,
mean_nnnc=mean_nnnc, var_nnnc=var_nnnc)
data = np.load(file_name)
mean_nnns = data['mean_nnns']
var_nnns = data['var_nnns']
mean_nnnc = data['mean_nnnc']
var_nnnc = data['var_nnnc']
print('mean simple = ',mean_nnns)
print('var simple = ',var_nnns)
print('mean compensated = ',mean_nnnc)
print('var compensated = ',var_nnnc)
# Make a random catalog with 2x as many sources, randomly distributed .
rng = np.random.RandomState(1234)
rx = rng.uniform(0,1000, rand_factor*nsource)
ry = rng.uniform(0,1000, rand_factor*nsource)
rand_cat = treecorr.Catalog(x=rx, y=ry)
rrr = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
t0 = time.time()
rrr.process(rand_cat)
t1 = time.time()
print('Time to process rand cat = ',t1-t0)
print('RRR:',rrr.tot)
print(rrr.ntri.ravel())
# Make the data catalog
x, y, _, _, k = generate_shear_field(nsource * source_factor, nhalo, rng=rng)
print('mean k = ',np.mean(k))
print('min,max = ',np.min(k),np.max(k))
p = k**3
p /= np.sum(p)
select = rng.choice(range(len(x)), size=nsource, replace=False, p=p)
cat = treecorr.Catalog(x=x[select], y=y[select])
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rdd = ddd.copy()
drr = ddd.copy()
ddd.process(cat)
rdd.process(rand_cat, cat)
drr.process(cat, rand_cat)
zeta_s1, var_zeta_s1 = ddd.calculateZeta(rrr)
zeta_c1, var_zeta_c1 = ddd.calculateZeta(rrr, drr, rdd)
print('DDD:',ddd.tot)
print(ddd.ntri.ravel())
print('simple: ')
print(zeta_s1.ravel())
print(var_zeta_s1.ravel())
print('DRR:',drr.tot)
print(drr.ntri.ravel())
print('RDD:',rdd.tot)
print(rdd.ntri.ravel())
print('compensated: ')
print(zeta_c1.ravel())
print(var_zeta_c1.ravel())
# Make the patches with a large random catalog to make sure the patches are uniform area.
big_rx = rng.uniform(0,1000, 100*nsource)
big_ry = rng.uniform(0,1000, 100*nsource)
big_catp = treecorr.Catalog(x=big_rx, y=big_ry, npatch=npatch, rng=rng)
patch_centers = big_catp.patch_centers
# Do the same thing with patches on D, but not yet on R.
dddp = treecorr.NNNCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rddp = dddp.copy()
drrp = dddp.copy()
catp = treecorr.Catalog(x=x[select], y=y[select], patch_centers=patch_centers)
print('Patch\tNtot')
for p in catp.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
print('with patches on D:')
dddp.process(catp)
rddp.process(rand_cat, catp)
drrp.process(catp, rand_cat)
# Need to run calculateZeta to get patch-based covariance
with assert_raises(RuntimeError):
dddp.estimate_cov('jackknife')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print('simple: ')
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
# Check the _calculate_xi_from_pairs function. Using all pairs, should get total xi.
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
# None of these are very good without the random using patches.
# I think this is basically just that the approximations used for estimating the area_frac
# to figure out the appropriate altered RRR counts isn't accurate enough when the total
# counts are as low as this. I think (hope) that it should be semi-ok when N is much larger,
# but this is probably saying that for 3pt using patches for R is even more important than
# for 2pt.
# Ofc, it could also be that this is telling me I still have a bug somewhere that I haven't
# managed to find... :(
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.3*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.2*tol_factor)
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr, drrp, rddp)
print('compensated: ')
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=3.8*tol_factor)
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.3*tol_factor)
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=2.6*tol_factor)
# Now with the random also using patches
# These are a lot better than the above tests. But still not nearly as good as we were able
# to get in 2pt. I'm pretty sure this is just due to the fact that we need to have much
# smaller catalogs to make it feasible to run this in a reasonable amount of time. I don't
# think this is a sign of any bug in the code.
print('with patched random catalog:')
rand_catp = treecorr.Catalog(x=rx, y=ry, patch_centers=patch_centers)
rrrp = rrr.copy()
rrrp.process(rand_catp)
drrp.process(catp, rand_catp)
rddp.process(rand_catp, catp)
print('simple: ')
zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrrp)
print('DDD:',dddp.tot)
print(dddp.ntri.ravel())
print(zeta_s2.ravel())
print(var_zeta_s2.ravel())
np.testing.assert_allclose(zeta_s2, zeta_s1, rtol=0.05 * tol_factor)
np.testing.assert_allclose(var_zeta_s2, var_zeta_s1, rtol=0.05 * tol_factor)
ddd1 = dddp.copy()
ddd1._calculate_xi_from_pairs(dddp.results.keys())
np.testing.assert_allclose(ddd1.zeta, dddp.zeta)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.7*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.0*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('compensated: ')
zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrrp, drrp, rddp)
print('DRR:',drrp.tot)
print(drrp.ntri.ravel())
print('RDD:',rddp.tot)
print(rddp.ntri.ravel())
print(zeta_c2.ravel())
print(var_zeta_c2.ravel())
np.testing.assert_allclose(zeta_c2, zeta_c1, rtol=0.05 * tol_factor, atol=1.e-3 * tol_factor)
np.testing.assert_allclose(var_zeta_c2, var_zeta_c1, rtol=0.05 * tol_factor)
t0 = time.time()
print('jackknife:')
cov = dddp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('sample:')
cov = dddp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('marked:')
cov = dddp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
print('bootstrap:')
cov = dddp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnnc))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnnc), atol=0.8*tol_factor)
t1 = time.time()
print('t = ',t1-t0)
t0 = time.time()
# I haven't implemented calculateZeta for the NNNCrossCorrelation class, because I'm not
# actually sure what the right thing to do here is for calculating a single zeta vectors.
# Do we do a different one for each of the 6 permutations? Or one overall one?
# So rather than just do something, I'll wait until someone has a coherent use case where
# they want this and can explain exactly what the right thing to compute is.
# So to just exercise the machinery with NNNCrossCorrelation, I'm using a func parameter
# to compute something equivalent to the simple zeta calculation.
dddc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1, rng=rng)
rrrc = treecorr.NNNCrossCorrelation(nbins=3, min_sep=50., max_sep=100., bin_slop=0.2,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.2, nvbins=1)
print('CrossCorrelation:')
dddc.process(catp, catp, catp)
rrrc.process(rand_catp, rand_catp, rand_catp)
def cc_zeta(corrs):
d, r = corrs
d1 = d.n1n2n3.copy()
d1._sum(d._all)
r1 = r.n1n2n3.copy()
r1._sum(r._all)
zeta, _ = d1.calculateZeta(r1)
return zeta.ravel()
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
# Repeat with a 1-2 cross-correlation
print('CrossCorrelation 1-2:')
dddc.process(catp, catp)
rrrc.process(rand_catp, rand_catp)
print('simple: ')
zeta_s3 = cc_zeta([dddc, rrrc])
print(zeta_s3)
np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor)
print('jackknife:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor)
print('sample:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.1*tol_factor)
print('marked:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor)
print('bootstrap:')
cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta)
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor)
@timer
def test_brute_jk():
# With bin_slop = 0, the jackknife calculation from patches should match a
# brute force calcaulation where we literally remove one patch at a time to make
# the vectors.
if __name__ == '__main__':
nhalo = 100
ngal = 500
npatch = 16
rand_factor = 5
else:
nhalo = 100
ngal = 30
npatch = 16
rand_factor = 2
rng = np.random.RandomState(8675309)
x, y, g1, g2, k = generate_shear_field(ngal, nhalo, rng)
rx = rng.uniform(0,1000, rand_factor*ngal)
ry = rng.uniform(0,1000, rand_factor*ngal)
rand_cat_nopatch = treecorr.Catalog(x=rx, y=ry)
rand_cat = treecorr.Catalog(x=rx, y=ry, npatch=npatch, rng=rng)
patch_centers = rand_cat.patch_centers
cat_nopatch = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, k=k, patch_centers=patch_centers)
print('cat patches = ',np.unique(cat.patch))
print('len = ',cat.nobj, cat.ntot)
assert cat.nobj == ngal
print('Patch\tNtot')
for p in cat.patches:
print(p.patch,'\t',p.ntot,'\t',patch_centers[p.patch])
# Start with KKK, since relatively simple.
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat_nopatch)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
kkk.process(cat)
np.testing.assert_allclose(kkk.zeta, kkk1.zeta)
kkk_zeta_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
kkk1.process(cat1)
print('zeta = ',kkk1.zeta.ravel())
kkk_zeta_list.append(kkk1.zeta.ravel())
kkk_zeta_list = np.array(kkk_zeta_list)
cov = np.cov(kkk_zeta_list.T, bias=True) * (len(kkk_zeta_list)-1)
varzeta = np.diagonal(np.cov(kkk_zeta_list.T, bias=True)) * (len(kkk_zeta_list)-1)
print('KKK: treecorr jackknife varzeta = ',kkk.varzeta.ravel())
print('KKK: direct jackknife varzeta = ',varzeta)
np.testing.assert_allclose(kkk.varzeta.ravel(), varzeta)
# Now GGG
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat_nopatch)
ggg = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
ggg.process(cat)
np.testing.assert_allclose(ggg.gam0, ggg1.gam0)
np.testing.assert_allclose(ggg.gam1, ggg1.gam1)
np.testing.assert_allclose(ggg.gam2, ggg1.gam2)
np.testing.assert_allclose(ggg.gam3, ggg1.gam3)
ggg_gam0_list = []
ggg_gam1_list = []
ggg_gam2_list = []
ggg_gam3_list = []
ggg_map3_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=100., max_sep=300., brute=True,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
ggg1.process(cat1)
ggg_gam0_list.append(ggg1.gam0.ravel())
ggg_gam1_list.append(ggg1.gam1.ravel())
ggg_gam2_list.append(ggg1.gam2.ravel())
ggg_gam3_list.append(ggg1.gam3.ravel())
ggg_map3_list.append(ggg1.calculateMap3()[0])
ggg_gam0_list = np.array(ggg_gam0_list)
vargam0 = np.diagonal(np.cov(ggg_gam0_list.T, bias=True)) * (len(ggg_gam0_list)-1)
print('GGG: treecorr jackknife vargam0 = ',ggg.vargam0.ravel())
print('GGG: direct jackknife vargam0 = ',vargam0)
np.testing.assert_allclose(ggg.vargam0.ravel(), vargam0)
ggg_gam1_list = np.array(ggg_gam1_list)
vargam1 = np.diagonal(np.cov(ggg_gam1_list.T, bias=True)) * (len(ggg_gam1_list)-1)
print('GGG: treecorr jackknife vargam1 = ',ggg.vargam1.ravel())
print('GGG: direct jackknife vargam1 = ',vargam1)
np.testing.assert_allclose(ggg.vargam1.ravel(), vargam1)
ggg_gam2_list = np.array(ggg_gam2_list)
vargam2 = np.diagonal(np.cov(ggg_gam2_list.T, bias=True)) * (len(ggg_gam2_list)-1)
print('GGG: treecorr jackknife vargam2 = ',ggg.vargam2.ravel())
print('GGG: direct jackknife vargam2 = ',vargam2)
np.testing.assert_allclose(ggg.vargam2.ravel(), vargam2)
ggg_gam3_list = np.array(ggg_gam3_list)
vargam3 = np.diagonal(np.cov(ggg_gam3_list.T, bias=True)) * (len(ggg_gam3_list)-1)
print('GGG: treecorr jackknife vargam3 = ',ggg.vargam3.ravel())
print('GGG: direct jackknife vargam3 = ',vargam3)
np.testing.assert_allclose(ggg.vargam3.ravel(), vargam3)
ggg_map3_list = np.array(ggg_map3_list)
varmap3 = np.diagonal(np.cov(ggg_map3_list.T, bias=True)) * (len(ggg_map3_list)-1)
covmap3 = treecorr.estimate_multi_cov([ggg], 'jackknife',
lambda corrs: corrs[0].calculateMap3()[0])
print('GGG: treecorr jackknife varmap3 = ',np.diagonal(covmap3))
print('GGG: direct jackknife varmap3 = ',varmap3)
np.testing.assert_allclose(np.diagonal(covmap3), varmap3)
# Finally NNN, where we need to use randoms. Both simple and compensated.
ddd = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1,
var_method='jackknife')
drr = ddd.copy()
rdd = ddd.copy()
rrr = ddd.copy()
ddd.process(cat)
drr.process(cat, rand_cat)
rdd.process(rand_cat, cat)
rrr.process(rand_cat)
zeta1_list = []
zeta2_list = []
for i in range(npatch):
cat1 = treecorr.Catalog(x=cat.x[cat.patch != i],
y=cat.y[cat.patch != i],
k=cat.k[cat.patch != i],
g1=cat.g1[cat.patch != i],
g2=cat.g2[cat.patch != i])
rand_cat1 = treecorr.Catalog(x=rand_cat.x[rand_cat.patch != i],
y=rand_cat.y[rand_cat.patch != i])
ddd1 = treecorr.NNNCorrelation(nbins=3, min_sep=100., max_sep=300., bin_slop=0,
min_u=0., max_u=1.0, nubins=1,
min_v=0., max_v=1.0, nvbins=1)
drr1 = ddd1.copy()
rdd1 = ddd1.copy()
rrr1 = ddd1.copy()
ddd1.process(cat1)
drr1.process(cat1, rand_cat1)
rdd1.process(rand_cat1, cat1)
rrr1.process(rand_cat1)
zeta1_list.append(ddd1.calculateZeta(rrr1)[0].ravel())
zeta2_list.append(ddd1.calculateZeta(rrr1, drr1, rdd1)[0].ravel())
print('simple')
zeta1_list = np.array(zeta1_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr)
varzeta1 = np.diagonal(np.cov(zeta1_list.T, bias=True)) * (len(zeta1_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta1)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta1)
print('compensated')
print(zeta2_list)
zeta2_list = np.array(zeta2_list)
zeta2, varzeta2 = ddd.calculateZeta(rrr, drr=drr, rdd=rdd)
varzeta2 = np.diagonal(np.cov(zeta2_list.T, bias=True)) * (len(zeta2_list)-1)
print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel())
print('NNN: direct jackknife varzeta = ',varzeta2)
np.testing.assert_allclose(ddd.varzeta.ravel(), varzeta2)
# Can't do patch calculation with different numbers of patches in rrr, drr, rdd.
rand_cat3 = treecorr.Catalog(x=rx, y=ry, npatch=3)
cat3 = treecorr.Catalog(x=x, y=y, patch_centers=rand_cat3.patch_centers)
rrr3 = rrr.copy()
drr3 = drr.copy()
rdd3 = rdd.copy()
rrr3.process(rand_cat3)
drr3.process(cat3, rand_cat3)
rdd3.process(rand_cat3, cat3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr3, drr, rdd)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr, rdd3)
with assert_raises(RuntimeError):
ddd.calculateZeta(rrr, drr3, rdd)
@timer
def test_finalize_false():
nsource = 80
nhalo = 100
npatch = 16
# Make three independent data sets
rng = np.random.RandomState(8675309)
x_1, y_1, g1_1, g2_1, k_1 = generate_shear_field(nsource, nhalo, rng)
x_2, y_2, g1_2, g2_2, k_2 = generate_shear_field(nsource, nhalo, rng)
x_3, y_3, g1_3, g2_3, k_3 = generate_shear_field(nsource, nhalo, rng)
# Make a single catalog with all three together
cat = treecorr.Catalog(x=np.concatenate([x_1, x_2, x_3]),
y=np.concatenate([y_1, y_2, y_3]),
g1=np.concatenate([g1_1, g1_2, g1_3]),
g2=np.concatenate([g2_1, g2_2, g2_3]),
k=np.concatenate([k_1, k_2, k_3]),
npatch=npatch)
# Now the three separately, using the same patch centers
cat1 = treecorr.Catalog(x=x_1, y=y_1, g1=g1_1, g2=g2_1, k=k_1, patch_centers=cat.patch_centers)
cat2 = treecorr.Catalog(x=x_2, y=y_2, g1=g1_2, g2=g2_2, k=k_2, patch_centers=cat.patch_centers)
cat3 = treecorr.Catalog(x=x_3, y=y_3, g1=g1_3, g2=g2_3, k=k_3, patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat1.patch, cat.patch[0:nsource])
np.testing.assert_array_equal(cat2.patch, cat.patch[nsource:2*nsource])
np.testing.assert_array_equal(cat3.patch, cat.patch[2*nsource:3*nsource])
# KKK auto
kkk1 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk1.process(cat)
kkk2 = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkk2.process(cat1, initialize=True, finalize=False)
kkk2.process(cat2, initialize=False, finalize=False)
kkk2.process(cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat2, cat1, initialize=False, finalize=False)
kkk2.process(cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat1, initialize=False, finalize=False)
kkk2.process(cat3, cat2, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross12
cat23 = treecorr.Catalog(x=np.concatenate([x_2, x_3]),
y=np.concatenate([y_2, y_3]),
g1=np.concatenate([g1_2, g1_3]),
g2=np.concatenate([g2_2, g2_3]),
k=np.concatenate([k_2, k_3]),
patch_centers=cat.patch_centers)
np.testing.assert_array_equal(cat23.patch, cat.patch[nsource:3*nsource])
kkk1.process(cat1, cat23)
kkk2.process(cat1, cat2, initialize=True, finalize=False)
kkk2.process(cat1, cat3, initialize=False, finalize=False)
kkk2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKKCross cross12
kkkc1 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc1.process(cat1, cat23)
kkkc2 = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
kkkc2.process(cat1, cat2, initialize=True, finalize=False)
kkkc2.process(cat1, cat3, initialize=False, finalize=False)
kkkc2.process(cat1, cat2, cat3, initialize=False, finalize=True)
for perm in ['k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1']:
kkk1 = getattr(kkkc1, perm)
kkk2 = getattr(kkkc2, perm)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKK cross
kkk1.process(cat, cat2, cat3)
kkk2.process(cat1, cat2, cat3, initialize=True, finalize=False)
kkk2.process(cat2, cat2, cat3, initialize=False, finalize=False)
kkk2.process(cat3, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# KKKCross cross
kkkc1.process(cat, cat2, cat3)
kkkc2.process(cat1, cat2, cat3, initialize=True, finalize=False)
kkkc2.process(cat2, cat2, cat3, initialize=False, finalize=False)
kkkc2.process(cat3, cat2, cat3, initialize=False, finalize=True)
for perm in ['k1k2k3', 'k1k3k2', 'k2k1k3', 'k2k3k1', 'k3k1k2', 'k3k2k1']:
kkk1 = getattr(kkkc1, perm)
kkk2 = getattr(kkkc2, perm)
np.testing.assert_allclose(kkk1.ntri, kkk2.ntri)
np.testing.assert_allclose(kkk1.weight, kkk2.weight)
np.testing.assert_allclose(kkk1.meand1, kkk2.meand1)
np.testing.assert_allclose(kkk1.meand2, kkk2.meand2)
np.testing.assert_allclose(kkk1.meand3, kkk2.meand3)
np.testing.assert_allclose(kkk1.zeta, kkk2.zeta)
# GGG auto
ggg1 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
ggg1.process(cat)
ggg2 = treecorr.GGGCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
ggg2.process(cat1, initialize=True, finalize=False)
ggg2.process(cat2, initialize=False, finalize=False)
ggg2.process(cat3, initialize=False, finalize=False)
ggg2.process(cat1, cat2, initialize=False, finalize=False)
ggg2.process(cat1, cat3, initialize=False, finalize=False)
ggg2.process(cat2, cat1, initialize=False, finalize=False)
ggg2.process(cat2, cat3, initialize=False, finalize=False)
ggg2.process(cat3, cat1, initialize=False, finalize=False)
ggg2.process(cat3, cat2, initialize=False, finalize=False)
ggg2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGG cross12
ggg1.process(cat1, cat23)
ggg2.process(cat1, cat2, initialize=True, finalize=False)
ggg2.process(cat1, cat3, initialize=False, finalize=False)
ggg2.process(cat1, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGGCross cross12
gggc1 = treecorr.GGGCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
gggc1.process(cat1, cat23)
gggc2 = treecorr.GGGCrossCorrelation(nbins=3, min_sep=30., max_sep=100., brute=True,
min_u=0.8, max_u=1.0, nubins=1,
min_v=0., max_v=0.2, nvbins=1)
gggc2.process(cat1, cat2, initialize=True, finalize=False)
gggc2.process(cat1, cat3, initialize=False, finalize=False)
gggc2.process(cat1, cat2, cat3, initialize=False, finalize=True)
for perm in ['g1g2g3', 'g1g3g2', 'g2g1g3', 'g2g3g1', 'g3g1g2', 'g3g2g1']:
ggg1 = getattr(gggc1, perm)
ggg2 = getattr(gggc2, perm)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGG cross
ggg1.process(cat, cat2, cat3)
ggg2.process(cat1, cat2, cat3, initialize=True, finalize=False)
ggg2.process(cat2, cat2, cat3, initialize=False, finalize=False)
ggg2.process(cat3, cat2, cat3, initialize=False, finalize=True)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
np.testing.assert_allclose(ggg1.gam0, ggg2.gam0)
np.testing.assert_allclose(ggg1.gam1, ggg2.gam1)
np.testing.assert_allclose(ggg1.gam2, ggg2.gam2)
np.testing.assert_allclose(ggg1.gam3, ggg2.gam3)
# GGGCross cross
gggc1.process(cat, cat2, cat3)
gggc2.process(cat1, cat2, cat3, initialize=True, finalize=False)
gggc2.process(cat2, cat2, cat3, initialize=False, finalize=False)
gggc2.process(cat3, cat2, cat3, initialize=False, finalize=True)
for perm in ['g1g2g3', 'g1g3g2', 'g2g1g3', 'g2g3g1', 'g3g1g2', 'g3g2g1']:
ggg1 = getattr(gggc1, perm)
ggg2 = getattr(gggc2, perm)
np.testing.assert_allclose(ggg1.ntri, ggg2.ntri)
np.testing.assert_allclose(ggg1.weight, ggg2.weight)
np.testing.assert_allclose(ggg1.meand1, ggg2.meand1)
np.testing.assert_allclose(ggg1.meand2, ggg2.meand2)
np.testing.assert_allclose(ggg1.meand3, ggg2.meand3)
| np.testing.assert_allclose(ggg1.gam0, ggg2.gam0) | numpy.testing.assert_allclose |
#!/usr/bin/env python
# ===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import unittest
import numpy.testing as npt
import numpy
from eotools.bulk_stats import bulk_stats
from scipy import stats
class TestStats(unittest.TestCase):
"""
Unittests for the bulk_stats funtion.
"""
def setUp(self):
self.data = numpy.random.ranf((10, 100, 100))
self.result = bulk_stats(self.data, double=True)
def test_mean(self):
"""
Test that the mean value is the same.
"""
control = numpy.mean(self.data, axis=0)
| npt.assert_allclose(control, self.result[1]) | numpy.testing.assert_allclose |
from __future__ import division
import numpy as np
from numpy.testing import assert_allclose, assert_, assert_equal, assert_raises
from nose import SkipTest
from .. import SuperSmoother, SuperSmootherMultiband
def _generate_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 10 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
def test_supersmoother(N=100, period=1):
t, y, dy = _generate_data(N, period)
model = SuperSmoother().fit(t, y, dy)
model.optimizer.period_range = (period / 1.1, period * 1.1)
model.optimizer.final_pass_coverage = 0
assert_allclose(period, model.best_period, rtol=0.02)
def test_supersmoother_dy_scalar(N=100, period=1):
t, y, dy = _generate_data(N, period)
# Make dy array all the same
dy[:] = dy.mean()
periods = np.linspace(period / 2, period * 2, 100)
assert_equal(SuperSmoother().fit(t, y, dy).score(periods),
SuperSmoother().fit(t, y, dy[0]).score(periods))
def test_supersmoother_dy_None(N=100, period=1):
t, y, dy = _generate_data(N, period)
periods = np.linspace(period / 2, period * 2, 100)
assert_equal(SuperSmoother().fit(t, y, 1).score(periods),
SuperSmoother().fit(t, y).score(periods))
def test_supersmoother_multiband(N=100, period=1):
"""Test that results are the same with/without filter labels"""
t, y, dy = _generate_data(N, period)
periods = | np.linspace(period / 2, period * 2, 100) | numpy.linspace |
import _pickle as cPickle
import collections
import operator
import os
import pdb
import random
from functools import reduce
from typing import List
import h5py
import joblib
import matplotlib.pyplot as plt
import numpy as np
from skimage.util import view_as_windows
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.layers import Dense, Conv2D, Dropout, \
Flatten, MaxPooling2D, BatchNormalization
from tensorflow.python.keras.losses import categorical_crossentropy
from tensorflow.python.keras.models import Sequential, load_model
from tensorflow.python.keras.optimizers import SGD
from ..preprocess.convert import EmptySliderFrameConverter
from .generator import HDF5DataGenerator
from .. import (
utils,
slider,
timing,
db,
definitions,
hyper_params
)
from . import data_utils
DIM_X = 69
DIM_Y = 41
DIM_Z = 1
seq_len = hyper_params.sample_frames
hidden_units = 128
n_mels = hyper_params.n_mels
n_fft = hyper_params.n_fft
hop_len = hyper_params.hop_length
dataset_file = 'train_inputs'
model_file = 'model_hits.test'
scaler_file = 'scaler_hits'
def flatten_array(t):
for x in t:
if type(x) == dict \
or type(x) == tuple \
or not isinstance(x, collections.Iterable):
yield x
else:
yield from flatten_array(x)
def get_scaler(path):
_scaler = joblib.load(path)
return _scaler
def get_model(path):
_model = load_model(path)
return _model
def get_sequences(melgram, _n_mels):
"""
Transform 2 or 3D mel spectrogram into 3D array of windows
:param melgram: 2 or 3D mel spectrogram (if 3D, squeezed to 2D)
:param _n_mels: number of mel buckets
:return: array of sequences. shape (n_frames, seq_len, n_mels)
"""
if len(melgram.shape) == 3:
melgram = np.squeeze(melgram)
sequences = view_as_windows(melgram, window_shape=(seq_len, _n_mels))
sequences = np.squeeze(sequences, 1)
return sequences
preprocess_args = {
'cut_beginning': False,
'seq_len': seq_len,
'beatmap_id': '405096'
}
optimizer_args = {
'lr': 1e-2, # was 1e-3
'decay': 1e-6,
'momentum': 0.9,
'nesterov': False
}
compile_args = {
'loss': categorical_crossentropy,
'optimizer': SGD(**optimizer_args),
'metrics': ['accuracy']
}
def get_fit_args(model_save_path):
return {
'batch_size': 32,
'epochs': 30,
'shuffle': False,
'callbacks': [ModelCheckpoint(model_save_path, save_best_only=False)]
}
def get_data_paths(data_dir) -> dict:
model_save_path = '{}/{}.h5'.format(data_dir, model_file)
continued_model = '{}/{}.continued.h5'.format(data_dir, model_file)
root_dir = '{}/train/model_data'.format(definitions.PROJECT_DIR)
scaler_save_path = '{}/scaler_hits.save'.format(data_dir)
all_table_path = '{}/all_train.hdf5'.format(data_dir)
train_table_path = '{}/train.hdf5'.format(data_dir)
hc_counts = '{}/hc_counts.npy'.format(data_dir)
n_counts = '{}/n_counts.npy'.format(data_dir)
val_table_all_path = '{}/all_validation.hdf5'.format(data_dir)
validation_data_path = '{}/validation.hdf5'.format(data_dir)
val_index_map_path = '{}/val_index_map.pkl'.format(data_dir)
all_index_map_path = '{}/all_index_map.pkl'.format(data_dir)
range_data_path = '{}/range_data.json'.format(data_dir)
train_inputs_path = '{}/train_inputs'.format(root_dir)
train_labels_path = '{}/train_labels'.format(root_dir)
return {
'model': model_save_path,
'continued_model': continued_model,
'range_data': range_data_path,
'scaler': scaler_save_path,
'all_table': all_table_path,
'train_table': train_table_path,
'train_inputs': train_inputs_path,
'train_labels': train_labels_path,
'val_table_all': val_table_all_path,
'validation_data': validation_data_path,
'val_index_map': val_index_map_path,
'hc_counts': hc_counts,
'n_counts': n_counts,
'all_index_map': all_index_map_path
}
def get_frame_indexes(objs, n_fft=hyper_params.n_fft,
hop_length=hyper_params.hop_length) -> List:
frames = [timing.mls_to_frames(float(x['time']),
n_fft=n_fft,
hop_length=hop_length)[0]
for x in objs]
return list(map(lambda x: int(x), frames))
def get_input_shape(dim_1):
return dim_1, DIM_X, DIM_Y
def build_inputs(spectrogram, indexes, bin_encoded, label):
'''
Returns np.array of training inputs for the events taking
places at the given indexes
params:
-------
bin_encoded: np.array (spectrogram.shape[0], d) where
d is the number of difficulties
returns:
--------
np.array: (len(indexes), sample_frames, mel_buckets + difficulties)
'''
if bin_encoded.shape[0] != spectrogram.shape[0]:
raise ValueError('''
Length of bin_encoded must match that of spectrogram. Found
the following: bin_encoded.shape = {}, spectrogram.shape = {}.
'''.format(bin_encoded.shape, spectrogram.shape))
dim1 = len(indexes)
labels = np.zeros((dim1, 4))
labels[:, label] = 1
inputs = np.empty(get_input_shape(dim1))
ctx_len = hyper_params.context_length
start_context_rows = -1
for i, index in enumerate(indexes):
context_labels = np.zeros((hyper_params.sample_frames, 1))
c = int((hyper_params.sample_frames - ctx_len) / 2)
context_labels[c:c + ctx_len] = bin_encoded[index - ctx_len: index]
inputs[i, :, :start_context_rows] = spectrogram[
index - ctx_len: index + ctx_len + 1]
inputs[i, :, start_context_rows:] = context_labels
return zip(inputs, labels)
def get_slider_points(beatmap_data) -> List[int]:
sliders = beatmap_data['sliders']
timing_points = beatmap_data['timing_points']
slider_multiplier = float(beatmap_data['metadata']['slider_multiplier'])
slider_points = []
for s in sliders:
repeats = int(s['repeat'])
start, end = slider.start_end_frames(
s,
timing_points,
slider_multiplier,
n_fft=hyper_params.n_fft,
hop_length=hyper_params.hop_length
)
slider_points.append(int(start))
slider_points.append(int(end))
duration = end - start
for repeat in range(1, repeats):
time = start + duration * (repeat + 1)
slider_points.append(int(time))
return slider_points
def get_bin_encoded(beatmap_data, song_length, pad=True):
bin_encoded = EmptySliderFrameConverter(
hit_circles=beatmap_data['hit_circles'],
sliders=beatmap_data['sliders'],
spinners=beatmap_data['spinners'],
timing_points=beatmap_data['timing_points'],
breaks=beatmap_data['breaks'],
song_length=song_length,
slider_multiplier=float(beatmap_data['metadata']['slider_multiplier']),
should_filter_breaks=False
).convert()
if pad:
pad_width = (
hyper_params.context_length,
hyper_params.context_length + 1
)
bin_encoded = np.pad(
bin_encoded,
pad_width,
mode='constant',
constant_values=0
)
return bin_encoded
def get_hit_indexes(beatmap_data):
return get_slider_points(beatmap_data) + \
get_frame_indexes(beatmap_data['hit_circles'])
def get_hit_vals(bin_encoded, indexes_hit, indexes_none):
'''
Returns
-------
hit_vals: np.array (n_hits, 2 * n_difficulties)
1-Hot vector:
[
0, - no hit
0, - medium hit only
0, - hard hit only
0 - medium and hard hits
]
'''
encoder = OneHotEncoder(4, sparse=False)
encoded = encoder.fit_transform(bin_encoded)
return np.concatenate(
(encoded[sorted(indexes_hit)], encoded[sorted(indexes_none)]))
def get_label_hits(hit_dict, pad=False):
medium_hits = []
hard_hits = []
both_hits = []
padding = hyper_params.context_length if pad else 0
for key, l in hit_dict.items():
key += padding
if len(l) > 1:
both_hits.append(key)
elif l[0] == 0:
medium_hits.append(key)
else:
hard_hits.append(key)
return medium_hits, hard_hits, both_hits
def get_inputs(beatmap_data: List, spectrogram, limit_hc=None, limit_n=None,
flatten=True):
breaks = reduce(operator.add, [b['breaks'] for b in beatmap_data])
song_len = spectrogram.shape[0]
bin_encoded = np.zeros((song_len, 1))
hit_dict = {}
for i, beatmap in enumerate(beatmap_data):
hits = sorted(get_hit_indexes(beatmap))
bin_encoded[hits] = bin_encoded[hits] + i + 1
for hit in hits:
if hit not in hit_dict:
hit_dict[hit] = [i]
else:
hit_dict[hit].append(i)
medium_indexes, hard_indexes, both_indexes = get_label_hits(hit_dict,
pad=True)
hit_indexes = medium_indexes + hard_indexes + both_indexes
if len(set(hit_indexes)) != len(hit_indexes):
pdb.set_trace()
spectrogram = utils.pad_array(spectrogram)
bin_encoded = utils.pad_array(bin_encoded)
none_indexes = get_none_indexes(hit_indexes, spectrogram, breaks)
none_inputs = build_inputs(spectrogram, none_indexes, bin_encoded, label=0)
medium_inputs = build_inputs(spectrogram, medium_indexes, bin_encoded,
label=1)
hard_inputs = build_inputs(spectrogram, hard_indexes, bin_encoded, label=2)
both_inputs = build_inputs(spectrogram, both_indexes, bin_encoded, label=3)
none_groups = get_group_lists(none_inputs, group_size_limit=limit_n,
label=0)
medium_groups = get_group_lists(medium_inputs, group_size_limit=limit_hc,
label=1)
hard_groups = get_group_lists(hard_inputs, group_size_limit=limit_hc,
label=2)
both_groups = get_group_lists(both_inputs, group_size_limit=limit_hc,
label=3)
if flatten:
all_inputs = flatten_array(
none_groups + medium_groups + hard_groups + both_groups
)
# split into [(input, label), ...] and [(label, x-label, group), ...]
input_labels, coords = zip(*all_inputs)
# split into [input, ...] and [label, ...]
all_inputs, all_labels = zip(*input_labels)
return all_inputs, all_labels, coords
return none_groups, medium_groups, hard_groups, both_groups
def get_counts(*args, limit_n=None):
inputs = get_inputs(*args, limit_n=limit_n, flatten=False)
counts = np.zeros((4, 3, 35))
if inputs is None:
return counts
def to_sum_array(g):
return [[len(l) for l in x] for x in g]
g_n, g_m, g_h, g_b = [to_sum_array(i) for i in inputs]
counts[0] = g_n
counts[1] = g_m
counts[2] = g_h
counts[3] = g_b
return counts
def get_none_indexes(event_indexes, spectrogram, breaks,
limit=None) -> np.ndarray:
intervals = []
for b in breaks:
intervals.append(b['start'])
intervals.append(b['end'])
song_range = np.arange(
hyper_params.context_length,
spectrogram.shape[0] - hyper_params.context_length
)
subarrays = np.split(song_range, intervals)
valid_indexes = np.concatenate(subarrays[::2]) if len(subarrays) > 2 else \
subarrays[0]
none_indexes = np.delete(valid_indexes, event_indexes)
| np.random.shuffle(none_indexes) | numpy.random.shuffle |
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import seaborn as sns
import numpy
import pandas
import copy
import os
import plotly.graph_objs as go
from .. import Dataset, MSDataset, NMRDataset
from ..enumerations import VariableType, SampleType, AssayRole
from ..utilities import rsd
from ._plotVariableScatter import plotVariableScatter
def plotRSDs(dataset, featureName='Feature Name', ratio=False, logx=True, xlim=None, withExclusions=True, sortOrder=True, savePath=None, color=None, featName=False, hLines=None, figureFormat='png', dpi=72, figureSize=(11,7)):
"""
plotRSDs(dataset, ratio=False, savePath=None, color=None \*\*kwargs)
Visualise analytical *versus* biological variance.
Plot RSDs calculated in study-reference samples (analytical variance), versus those calculated in study samples (biological variance). RSDs can be visualised either in absolute terms, or as a ratio to analytical variation (*ratio=*\ ``True``).
:py:func:`plotRSDs` requires that the dataset have at least two samples with the :py:attr:`~nPYc.enumerations.AssayRole.PrecisionReference` :term:`assay role`, if present, RSDs calculated on independent sets of :py:attr:`~nPYc.enumerations.AssayRole.PrecisionReference` samples will also be plotted.
:param Dataset dataset: Dataset object to plot, the object must have greater that one 'Study Sample' and 'Study-Reference Sample' defined
:param bool ratio: If ``True`` plot the ratio of analytical variance to biological variance instead of raw values
:param str featureName: featureMetadata column name by which to label features
:param bool logx: If ``True`` plot RSDs on a log10 scaled axis
:param xlim: Tuple of (min, max) RSD values to plot
:type xlim: None or tuple(float, float)
:param hLines: None or list of y positions at which to plot an horizontal line. Features are positioned from 1 to nFeat
:type hLines: None or list
:param savePath: If ``None`` plot interactively, otherwise save the figure to the path specified
:type savePath: None or str
:param color: Allows the default colour pallet to be overridden
:type color: None or seaborn.palettes._ColorPalette
:param bool featName: If ``True`` y-axis label is the feature Name, if ``False`` features are numbered.
"""
rsdTable = _plotRSDsHelper(dataset, featureName=featureName, ratio=ratio, withExclusions=withExclusions, sortOrder=sortOrder)
# if RSD not able to be calculated for some features - rsdTable size will be less than dataset.featureMetadata
if hLines is not None:
if dataset.featureMetadata.shape[0] != rsdTable.shape[0]:
temp = [x for x in rsdTable['Feature Name'].values.tolist() if x in dataset.featureMetadata[featureName][dataset.featureMetadata['Passing Selection'] == False].values.tolist()]
hLines = [len(temp)]
# Plot
if xlim:
xLim = xlim
else:
minRSD = numpy.min(rsdTable[rsdTable.columns[1:]].values)
maxRSD = numpy.max(rsdTable[rsdTable.columns[1:]].values)
xLim = (minRSD, maxRSD)
if logx:
xlab = 'RSD (%)'
else:
xlab = 'RSD (%)'
# Add Feature Name if required
if featName:
rsdTable['yName'] = rsdTable['Feature Name']
ylab = 'Feature Name'
else:
ylab = 'Feature Number'
plotVariableScatter(rsdTable, logX=logx, xLim=xLim, xLabel=xlab, yLabel=ylab, sampletypeColor=True, hLines=hLines, vLines=None, savePath=savePath, figureFormat=figureFormat, dpi=dpi, figureSize=figureSize)
def plotRSDsInteractive(dataset, featureName='Feature Name', ratio=False, logx=True):
"""
Plotly-based interactive version of :py:func:`plotRSDs`
Visualise analytical *versus* biological variance.
Plot RSDs calculated in study-reference samples (analytical variance), versus those calculated in study samples (biological variance). RSDs can be visualised either in absolute terms, or as a ratio to analytical variation (*ratio=*\ ``True``).
:py:func:`plotRSDsInteractive` requires that the dataset have at least two samples with the :py:attr:`~nPYc.enumerations.AssayRole.PrecisionReference` :term:`assay role`, if present, RSDs calculated on independent sets of :py:attr:`~nPYc.enumerations.AssayRole.PrecisionReference` samples will also be plotted.
:param Dataset dataset: Dataset object to plot, the object must have greater that one 'Study Sample' and 'Study-Reference Sample' defined
:param str featureName: featureMetadata column name by which to label features
:param bool ratio: If ``True`` plot the ratio of analytical variance to biological variance instead of raw values
:param bool logx: If ``True`` plot RSDs on a log10 scaled axis
"""
rsdTable = _plotRSDsHelper(dataset, featureName=featureName, ratio=ratio)
reversedIndex = numpy.arange(len(rsdTable)-1,-1, -1)
data = []
if SampleType.StudySample in rsdTable.columns:
studySamples = go.Scatter(
x = rsdTable[SampleType.StudySample].values,
y = reversedIndex,
mode = 'markers',
text = rsdTable['Feature Name'],
name = 'Study Sample',
marker = dict(
color = 'rgba(89, 117, 164, .8)',
),
hoverinfo = 'x+text',
)
data.append(studySamples)
if SampleType.ExternalReference in rsdTable.columns:
externalRef = go.Scatter(
x = rsdTable[SampleType.ExternalReference].values,
y = reversedIndex,
mode = 'markers',
text = rsdTable['Feature Name'],
name = 'Long-Term Reference',
marker = dict(
color = 'rgba(181, 93, 96, .8)',
),
hoverinfo = 'x+text',
)
data.append(externalRef)
if SampleType.StudyPool in rsdTable.columns:
studyPool = go.Scatter(
x = rsdTable[SampleType.StudyPool].values,
y = reversedIndex,
mode = 'markers',
text = rsdTable['Feature Name'],
name = 'Study Reference',
marker = dict(
color = 'rgba(95, 158, 110, .8)',
),
hoverinfo = 'x+text',
)
data.append(studyPool)
if logx:
xaxis = dict(
type='log',
title='RSD (%)',
autorange=True
)
else:
xaxis = dict(
title='RSD (%)'
)
layout = go.Layout(
title='Feature RSDs',
legend=dict(
orientation="h"
),
hovermode = "closest",
yaxis=dict(
title='Feature Number'
),
xaxis=xaxis
)
figure = go.Figure(data=data, layout=layout)
return figure
def _plotRSDsHelper(dataset, featureName='Feature Name', ratio=False, withExclusions=False, sortOrder=True):
if not dataset.VariableType == VariableType.Discrete:
raise ValueError('Only datasets with discreetly sampled variables are supported.')
if sum(dataset.sampleMetadata.loc[dataset.sampleMask, 'SampleType'].values == SampleType.StudySample) <= 2:
raise ValueError('More than two Study Samples must be defined to calculate biological RSDs.')
## Calculate RSD for every SampleType with enough PrecisionReference samples.
rsdVal = dict()
precRefMask = dataset.sampleMetadata.loc[:, 'AssayRole'].values == AssayRole.PrecisionReference
precRefMask = numpy.logical_and(precRefMask, dataset.sampleMask)
sTypes = list(set(dataset.sampleMetadata.loc[precRefMask, 'SampleType'].values))
if withExclusions:
rsdVal['Feature Name'] = dataset.featureMetadata.loc[dataset.featureMask, featureName].values
rsdVal[SampleType.StudyPool] = dataset.rsdSP[dataset.featureMask]
ssMask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudySample) & dataset.sampleMask
rsdList = rsd(dataset.intensityData[ssMask, :])
rsdVal[SampleType.StudySample] = rsdList[dataset.featureMask]
else:
rsdVal['Feature Name'] = dataset.featureMetadata.loc[:, featureName].values
rsdVal[SampleType.StudyPool] = dataset.rsdSP
ssMask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudySample) & dataset.sampleMask
rsdList = rsd(dataset.intensityData[ssMask, :])
rsdVal[SampleType.StudySample] = rsdList
# Only keep features with finite values for SP and SS
finiteMask = (rsdVal[SampleType.StudyPool] < numpy.finfo(numpy.float64).max)
finiteMask = finiteMask & (rsdVal[SampleType.StudySample] < numpy.finfo(numpy.float64).max)
for sType in sTypes:
if not sTypes == SampleType.StudyPool:
sTypeMask = dataset.sampleMetadata.loc[:, 'SampleType'].values == sType
# precRefMask limits to Precision Reference and dataset.sampleMask
sTypeMask = numpy.logical_and(sTypeMask, precRefMask)
# minimum 3 points needed
if sum(sTypeMask) >= 3:
rsdList = rsd(dataset.intensityData[sTypeMask, :])
if withExclusions:
rsdVal[sType] = rsdList[dataset.featureMask]
else:
rsdVal[sType] = rsdList
finiteMask = finiteMask & (rsdVal[sType] < numpy.finfo(numpy.float64).max)
## apply finiteMask
for sType in rsdVal.keys():
rsdVal[sType] = rsdVal[sType][finiteMask]
if ratio:
rsdSP = copy.deepcopy(rsdVal[SampleType.StudyPool])
for sType in sTypes:
rsdVal[sType] = | numpy.divide(rsdVal[sType], rsdSP) | numpy.divide |
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import math
import numpy as np
class Box(object):
def __init__(
self,
width=1,
height=1,
length=1,
centerX=0,
centerY=0,
centerZ=0,
yaw=0.0,
pitch=0.0,
roll=0.0,
translationX=0,
translationY=0,
translationZ=0,
):
# In webots length is in z-axis, width is in x-axis and height is in y-axis
# Center is the rotation center for the box
# -> in webots, this should be the rear axle location relative to the center of the box
# -> center is the vector from the true center of the box to the rotation center of the box
# In webots yaw is CC around the y-axis!
# In webots pitch is CC around the z-axis!
# In webots roll is CC around the x-axis!
# NOTE: this geometry class applies a translation to get the center of rotation,
# rotates the box and then applies a global translation to move the rectangle in a global coordinate system
self.dimensions = np.array([width, height, length])
self.center = np.array([centerX, centerY, centerZ])
self.translation = np.array([translationX, translationY, translationZ])
self.yaw = yaw
self.pitch = pitch
self.roll = roll
self.unrotatedegocorners = self._getunrotatedegocorners()
self.rotation = self.getyawrollpitchrotation(self.yaw, self.pitch, self.roll)
# The transpose is the inverse rotation matrix
self.reverserotation = | np.transpose(self.rotation) | numpy.transpose |
# -*- coding: utf-8 -*-
import os
import mxnet as mx
import numpy as np
import cv2
import shutil
def iou(x, ys):
"""
Calculate intersection-over-union overlap
Params:
----------
x : numpy.array
single box [xmin, ymin ,xmax, ymax]
ys : numpy.array
multiple box [[xmin, ymin, xmax, ymax], [...], ]
Returns:
-----------
numpy.array
[iou1, iou2, ...], size == ys.shape[0]
"""
ixmin = np.maximum(ys[:, 0], x[0])
iymin = np.maximum(ys[:, 1], x[1])
ixmax = np.minimum(ys[:, 2], x[2])
iymax = np.minimum(ys[:, 3], x[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \
(ys[:, 3] - ys[:, 1]) - inters
ious = inters / uni
ious[uni < 1e-12] = 0 # in case bad boxes
return ious
def plot_rectangle(predict_box, label_box, img_name, img_path, error_img_path, error_img_head_path = None):
if predict_box.ndim == 1:
predict_box = predict_box.reshape(1, predict_box.size)
filename = os.path.join(img_path, img_name)
img = cv2.imread(filename)
class_list = ['LPRrect']
font = cv2.FONT_HERSHEY_COMPLEX
Thickness_box = 1
Thickness_text = 1
height = img.shape[0]
width = img.shape[1]
img_head = img.copy() # 用于后面保存预测框内的图片
img_name = img_name.split('/')[-1]
# 红色画出预测框, 标注出置信度,iou,类别
ious = iou(label_box[1:5], predict_box[:,2:])
for j in range(predict_box.shape[0]):
xmin = int(predict_box[j][2]*width)
ymin = int(predict_box[j][3]*height)
xmax = int(predict_box[j][4]*width)
ymax = int(predict_box[j][5]*height)
cv2.rectangle(img, (xmin,ymin), (xmax,ymax), (0,0,255), Thickness_box)
text = str((class_list[int(predict_box[j][0])], round(predict_box[j][1],4), round(ious[j],4)))
cv2.putText(img, text, (xmin,ymax+50), font, 1, (0,0,255), Thickness_text)
# 绿色画出真实框, 标注出类别
cv2.rectangle(img, (int(label_box[1]*width),int(label_box[2]*height)),
(int(label_box[3]*width),int(label_box[4]*height)), (0,255,0), Thickness_box)
cv2.putText(img, class_list[int(label_box[0])], (int(label_box[1]*width),int(label_box[2]*height)-10),
font, 1, (0,255,0), Thickness_text)
# 裁剪出预测框并保存到指定目录下
if predict_box.shape[0]==1 and error_img_head_path != None:
img_head = img_head[ymin:ymax, xmin:xmax]
cv2.imwrite(error_img_head_path+img_name, img_head)
cv2.imwrite(error_img_path+img_name, img)
def find_wrong_detection(labels, preds, list_path, img_path, ovp_thresh = 0.5):
"""
compare the labels and preds to find false negative and false positive.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])
labels.shape : test sample number * 1 * 6
labels.type : <class 'mxnet.ndarray.ndarray.NDArray'>
preds: mx.nd.array (m * 6)
2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)\
preds.shape : test sample number * anchor number * 6
preds.type : <class 'mxnet.ndarray.ndarray.NDArray'>
该函数只考虑了每张图片中有且只有一个真实框的情景
"""
flags = [-1]*labels.shape[0]
# -1: 未设置,背景,真实和预测都为背景
# 0 : 正确
# 1 : iou<ovpt_hresh
# 2 : 预测框的类别数少于真实框个数(漏检)或个数一致但类别不一致
# 存放类别不一致的错误图片
wrong_class_img_path = os.path.join(img_path, 'worng_class/')
# 存放iou
low_iou_img_path = os.path.join(img_path, 'low_iou/')
low_iou_img_head_path = os.path.join(img_path, 'low_iou_head/')
if os.path.exists(wrong_class_img_path):
shutil.rmtree(wrong_class_img_path)
os.mkdir(wrong_class_img_path)
if os.path.exists(low_iou_img_path):
shutil.rmtree(low_iou_img_path)
os.mkdir(low_iou_img_path)
if os.path.exists(low_iou_img_head_path):
shutil.rmtree(low_iou_img_head_path)
os.mkdir(low_iou_img_head_path)
fp = open(list_path)
listlines = fp.readlines()
img_name_list = []
for lines in listlines:
imgname = lines.split('\t')[-1]
# 去除换行符
imgname = imgname.replace('\r','').replace('\n','').replace('\t','')
img_name_list.append(imgname)
# 存放每张图片预测框的iou最大值,
iou_list = []
for i in range(labels.shape[0]):
# get as numpy arrays
label = labels[i].asnumpy()
pred = preds[i].asnumpy()
img_name = img_name_list[i]
# 删除预测为背景和非机动车的预测框
background_indices = np.where(pred[:, 0].astype(int) < 0)[0]
pred = | np.delete(pred, background_indices, axis=0) | numpy.delete |
# Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the logistic
# regression exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoid.py
# costFunction.py
# predict.py
# costFunctionReg.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from plotData import *
import costFunction as cf
import plotDecisionBoundary as pdb
import predict as predict
from sigmoid import *
plt.ion()
# Load data
# The first two columns contain the exam scores and the third column contains the label.
data = np.loadtxt('ex2data1.txt', delimiter=',')
print('plot_decision_boundary data[0, 0:1] = \n{}'.format(data[0, 0:1]))
print('plot_decision_boundary data[0, 0:2] = \n{}'.format(data[0, 0:2]))
print('plot_decision_boundary data[0, 0:3] = \n{}'.format(data[0, 0:3]))
print('plot_decision_boundary data[0, 1:1] = \n{}'.format(data[0, 1:1]))
print('plot_decision_boundary data[0, 1:2] = \n{}'.format(data[0, 1:2]))
print('plot_decision_boundary data[0, 1:3] = \n{}'.format(data[0, 1:3]))
print('plot_decision_boundary data[0, 2:1] = \n{}'.format(data[0, 2:1]))
print('plot_decision_boundary data[0, 2:2] = \n{}'.format(data[0, 2:2]))
print('plot_decision_boundary data[0, 2:3] = \n{}'.format(data[0, 2:3]))
X = data[:, 0:2]
y = data[:, 2]
# ===================== Part 1: Plotting =====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting Data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
plot_data(X, y)
plt.axis([30, 100, 30, 100])
# Specified in plot order. 按绘图顺序指定
plt.legend(['Admitted', 'Not admitted'], loc=1)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py
# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape
# Add intercept term
X = np.c_[np.ones(m), X]
# Initialize fitting parameters
initial_theta = np.zeros(n + 1) # 初始化权重theta
# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)
| np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format}) | numpy.set_printoptions |
import numpy as np
from sklearn import cluster
def gaussian_sp(delta_t, miu, sigma=65):
x, u, sig = delta_t, miu, sigma
p = | np.exp(-(x-u)**2 / (2*sig**2)) | numpy.exp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.