prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import cv2
from PIL import Image
import numpy as np
import pydensecrf.densecrf as dcrf
import multiprocessing
import os
from os.path import exists
palette = [0,0,0, 128,0,0, 0,128,0, 128,128,0, 0,0,128, 128,0,128, 0,128,128, 128,128,128,
64,0,0, 192,0,0, 64,128,0, 192,128,0, 64,0,128, 192,0,128, 64,128,128, 192,128,128,
0,64,0, 128,64,0, 0,192,0, 128,192,0, 0,64,128]
cats = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tv']
# prepare data
data_path = 'data/VOCdevkit/VOC2012/'
train_lst_path = data_path + 'ImageSets/Segmentation/train_cls.txt'
im_path = data_path + 'JPEGImages/'
sal_path = data_path + 'saliency_aug/'
att_path = data_path + 'feat_1/'
save_path = data_path + 'proxy_label_1/'
if not exists(save_path):
os.makedirs(save_path)
with open(train_lst_path) as f:
lines = f.readlines()
# generate proxy labels
def gen_gt(index):
line = lines[index]
line = line[:-1]
fields = line.split()
name = fields[0]
im_name = im_path + name + '.jpg'
bg_name = sal_path + name + '.png'
img = cv2.imread(im_name)
sal = cv2.imread(bg_name, 0)
height, width = sal.shape
gt = np.zeros((21, height, width), dtype=np.float32)
sal = np.array(sal, dtype=np.float32)
conflict = 0.9
bg_thr = 32
att_thr = 0.8
gt[0] = (1 - (sal / 255))
init_gt = np.zeros((height, width), dtype=float)
sal_att = sal.copy()
for i in range(len(fields) - 1):
k = i + 1
cls = int(fields[k])
att_name = att_path + name + '_' + str(cls) + '.png'
if not exists(att_name):
continue
att = cv2.imread(att_name, 0)
att = (att - np.min(att)) / (np.max(att) - np.min(att) + 1e-8)
gt[cls+1] = att.copy()
sal_att = np.maximum(sal_att, (att > att_thr) *255)
bg = | np.array(gt > conflict, dtype=np.uint8) | numpy.array |
import numba
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin
from random_state import ensure_random_state
njit_cached = numba.njit(cache=True)
@njit_cached
def _score_split(y, partition):
"""Return the mean squared error of a potential split partition.
Parameters
----------
y : (num_samples,) ndarray
The vector of targets in the current node.
partition : tuple
A 2-tuple of boolean masks to index left and right samples in
``y``.
"""
mask_left, mask_right = partition
y_left = y[mask_left]
y_right = y[mask_right]
return (np.sum((y_left - y_left.mean()) ** 2) +
np.sum((y_right - y_right.mean()) ** 2))
@njit_cached
def _find_best_split(x, y):
"""Find the best split for a vector of samples.
Determine the threshold in `x` which optimizes the split score by
minimizing the mean squared error of the target.
Parameters
----------
x : (num_samples,) ndarray
The vector of observations of a particular feature.
y : (num_samples,) ndarray
The vector of targets.
Returns
-------
split_configuration : dict
A dictionary with the best `score`, `threshold` and `partition`.
"""
best_score = np.inf
best_threshold = None
best_partition = None
for threshold in x:
# Obtain binary masks for all samples whose feature values are
# below (left) or above (right) the split threshold.
mask_left = x < threshold
mask_right = x >= threshold
# If we can't split the samples based on `threshold', move on.
if not mask_left.any() or not mask_right.any():
continue
# Score the candidate split.
partition = (mask_left, mask_right)
score = _score_split(y, partition)
if score < best_score:
best_score = score
best_threshold = threshold
best_partition = partition
return best_score, best_threshold, best_partition
class Tree:
"""The fundamental data structure representing a binary decision tree.
Parameters
----------
max_depth : int or None
The maximum allowed tree depth. In general, this requires pruning the
tree to select the best subtree configuration.
min_samples_split : int
The minimum number of samples required to split an internal node.
max_features : int or None
The size of the randomly selected subset of features to consider when
splitting an internal node.
random_state : numpy.random.Generator or int or None
The random state of the estimator to allow reproducible tree
construction when `max_features` is not None.
Attributes
----------
left : Tree or None
The left node of the tree or None if the current node is a leaf.
right : Tree or None
The right node of the tree or None if the current node is a leaf.
feature_index: int
The column index of the feature to split on in the current node.
threshold : float or None
The feature value to split by or None if the node is a leaf.
prediction : float or None
The prediction value if the node is a leaf or None.
"""
def __init__(self, max_depth=None, min_samples_split=2, max_features=None,
random_state=None):
self._max_depth = max_depth
self._min_samples_split = min_samples_split
self._max_features = max_features
self._random_state = random_state
if self._max_depth is None:
self._max_depth = np.inf
if self._max_features is not None:
assert self._random_state is not None, "No random state provided"
self.left = None
self.right = None
self.feature_index = None
self.threshold = None
self.prediction = None
def construct_tree(self, X, y, depth=0):
"""Construct the binary decision tree via recursive splitting.
Parameters
----------
X : (num_samples, num_features) ndarray
The matrix of observations.
y : (num_samples,) ndarray
The vector of targets corresponding to the observations `X`.
"""
num_samples, num_features = X.shape
# Too few samples to split, so turn the node into a leaf.
if num_samples < self._min_samples_split or depth >= self._max_depth:
self.prediction = y.mean()
return
random_state = ensure_random_state(self._random_state)
if self._max_features is not None:
feature_indices = random_state.integers(
num_features, size=min(self._max_features, num_features))
else:
feature_indices = | np.arange(num_features) | numpy.arange |
import os
from pathlib import Path
from unittest import TestCase
import numpy as np
import pytest
import s3fs
import zarr
from easepy import EaseGrid
path = Path(__file__)
BASE_FILE_LOCATION = "s3://public-test-data/easepy/"
class TestEasepy(TestCase):
def setUp(self):
pass
@pytest.mark.unit
def test_ease_north_hemi_geodetic2ease_array(self):
ease = EaseGrid(12000, "NorthHemi")
lats = np.array([75, 85, 89.99])
lons = | np.array([-175, 7, 155]) | numpy.array |
from soxs.instrument import make_background, AuxiliaryResponseFile, \
instrument_simulator, make_background_file, simulate_spectrum, \
RedistributionMatrixFile
from soxs.background.foreground import hm_astro_bkgnd
from soxs.background.instrument import acisi_particle_bkgnd
from soxs.background.spectra import ConvolvedBackgroundSpectrum
from numpy.random import RandomState
from numpy.testing import assert_allclose
import astropy.io.fits as pyfits
import tempfile
import os
import shutil
import numpy as np
prng = RandomState(24)
def test_uniform_bkgnd_scale():
hdxi_arf = AuxiliaryResponseFile("xrs_hdxi_3x10.arf")
events, event_params = make_background((50, "ks"), "hdxi", [30., 45.],
foreground=True, instr_bkgnd=True,
ptsrc_bkgnd=False, prng=prng)
ncts = np.logical_and(events["energy"] >= 0.7, events["energy"] <= 2.0).sum()
t_exp = event_params["exposure_time"]
fov = (event_params["fov"]*60.0)**2
S = ncts/t_exp/fov
dS = np.sqrt(ncts)/t_exp/fov
foreground = ConvolvedBackgroundSpectrum(hm_astro_bkgnd, hdxi_arf)
f_sum = foreground.get_flux_in_band(0.7, 2.0)[0]
i_sum = acisi_particle_bkgnd.get_flux_in_band(0.7, 2.0)[0]
b_sum = (f_sum+i_sum).to("ph/(arcsec**2*s)").value
assert np.abs(S-b_sum) < 1.645*dS
def test_simulate_bkgnd_spectrum():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
prng = RandomState(29)
hdxi_arf = AuxiliaryResponseFile("xrs_hdxi_3x10.arf")
hdxi_rmf = RedistributionMatrixFile("xrs_hdxi.rmf")
exp_time = 50000.0
fov = 3600.0
simulate_spectrum(None, "hdxi", exp_time, "test_bkgnd.pha",
instr_bkgnd=True, foreground=True, prng=prng,
overwrite=True, bkgnd_area=(fov, "arcsec**2"))
ch_min = hdxi_rmf.e_to_ch(0.7)-hdxi_rmf.cmin
ch_max = hdxi_rmf.e_to_ch(2.0)-hdxi_rmf.cmin
f = pyfits.open("test_bkgnd.pha")
ncts = f["SPECTRUM"].data["COUNTS"][ch_min:ch_max].sum()
f.close()
S = ncts/exp_time/fov
dS = np.sqrt(ncts)/exp_time/fov
foreground = ConvolvedBackgroundSpectrum(hm_astro_bkgnd, hdxi_arf)
f_sum = foreground.get_flux_in_band(0.7, 2.0)[0]
i_sum = acisi_particle_bkgnd.get_flux_in_band(0.7, 2.0)[0]
b_sum = (f_sum+i_sum).to("ph/(arcsec**2*s)").value
assert np.abs(S-b_sum) < 1.645*dS
os.chdir(curdir)
shutil.rmtree(tmpdir)
def test_add_background():
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
os.chdir(tmpdir)
prng1 = RandomState(29)
prng2 = RandomState(29)
ra0 = 30.0
dec0 = 45.0
ra1 = 22.0
dec1 = 22.0
exp_time = 50000.0
ra = np.array([])
dec = np.array([])
e = | np.array([]) | numpy.array |
from .common_setup import *
import numpy as np
import tensorflow as tf
from astropy import time as at
from bayes_filter import float_type
from bayes_filter.coord_transforms import itrs_to_enu_6D, tf_coord_transform, itrs_to_enu_with_references, ITRSToENUWithReferences
from bayes_filter.feeds import IndexFeed, TimeFeed, CoordinateFeed, init_feed
from bayes_filter.misc import make_coord_array
def test_itrs_to_enu_6D(tf_session, time_feed, lofar_array):
# python test
times = | np.arange(2) | numpy.arange |
import numpy as np
class one_fsq_noise(object):
def __init__(self):
self.buffer = | np.array([0.]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
"""
This script loads a template file and fills in IDs in columns where they are missing
author: <NAME> for Knocean Inc., 22 September 2020
"""
import pandas as pd
import numpy as np
from pathlib import Path
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(
"-t", "--template", dest="template_file", help="Template file", metavar="FILE", required=True
)
parser.add_argument(
"-m",
"--metadata-table",
dest="metadata_table_path",
help="Path to the IHCC metadata table",
metavar="FILE",
)
parser.add_argument(
"-l",
"--length-id",
dest="length_of_id",
help="How many characters should your id by at most?",
metavar="FILE",
default=7,
)
args = parser.parse_args()
# Get the data dictionary id. If a metada data file is supplied,
# get it from there, else use the path to uppercase
if args.metadata_table_path:
df = pd.read_csv(args.metadata_table_path, header=None, sep="\t")
dd_id = df[df.iloc[:, 0] == "Cohort ID"].iloc[:, 1].iloc[0]
else:
dd_id = Path(args.template_file).stem
dd_id = dd_id.upper()
print("Generating IDs for data dictionary: %s" % dd_id)
NUM_PADDED_ZERO = args.length_of_id
MAX_ID = int("9" * NUM_PADDED_ZERO)
PREFIX = "%s:" % dd_id
COL_TERM_ID = "Term ID"
COL_LABEL = "Label"
df = pd.read_csv(args.template_file, sep="\t", dtype=str)
len_pre = len(df)
highest_current_id = 0
if COL_TERM_ID in df.columns:
df_nn = df[df[COL_TERM_ID].notnull()]
ids = df_nn[df_nn[COL_TERM_ID].str.startswith(PREFIX)][COL_TERM_ID].tolist()
ids = [i.replace(PREFIX, "") for i in ids]
ids_int = [int(i) for i in ids if i.isdigit()]
if ids_int:
highest_current_id = max(ids_int)
else:
df[COL_TERM_ID] = ""
for index, row in df.iterrows():
value = row[COL_TERM_ID]
if row[COL_LABEL] or (value.dtype == float and not | np.isnan(value) | numpy.isnan |
import random
import cv2
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from aug import Operation, perform_randomly, utils
@perform_randomly
class PerspectiveDistortion(Operation):
def __init__(self, max_warp=0.2, input_mtx=None, return_mtx=False):
self._max_warp = max_warp
self._mtx = input_mtx
self._return_mtx = return_mtx
def get_mtx(self, im_height, im_width):
b = int(min(im_height, im_width) * self._max_warp)
r = random.randint
pts2 = np.float32([[0, 0], [im_width - 1, 0], [0, im_height - 1],
[im_width - 1, im_height - 1]])
pts1 = np.float32([[r(0, b), r(0, b)], [im_width - 1 - r(0, b),
r(0, b)], [r(0, b), im_height - 1 - r(0, b)],
[im_width - 1 - r(0, b), im_height - 1 - r(0, b)]])
return cv2.getPerspectiveTransform(pts1, pts2)
def transform_perspective_and_get_matrix(self, img):
"""
Find four random points within image and apply perspective transformation
Args:
img: input image
max_warp: limiter of points positions
mtx: perspective matrix
"""
im_height, im_width = img.shape[:2]
if self._mtx is None:
self._mtx = self.get_mtx(im_height, im_width)
return cv2.warpPerspective(img, self._mtx, (im_width, im_height)), self._mtx
def apply_on_image(self, img):
image, mtx = self.transform_perspective_and_get_matrix(img)
if self._return_mtx:
return image, mtx
return image
def apply_on_annotations(self, annotations):
"""Apply transformation on set of points. """
if self._mtx is not None and annotations is not None:
annotations = annotations.astype(np.float32)
annotations = cv2.perspectiveTransform(annotations, self._mtx)
return annotations
def apply_on_masks(self, masks):
return np.array([self.apply_on_image(mask) for mask in list(masks)])
@perform_randomly
class ElasticDistortion(Operation):
"""
Based on: https://github.com/albu/albumentations/blob/master/albumentations/augmentations/functional.py
"""
def __init__(self,
alpha=100.,
sigma=10.,
alpha_affine_range=10.,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101):
self._alpha = alpha
self._sigma = sigma
self._alpha_affine = alpha_affine_range
self._interpolation = interpolation
self._border_mode = border_mode
self._alpha = float(self._alpha)
self._sigma = float(self._sigma)
self._alpha_affine = float(self._alpha_affine)
self._mapx = None
self._mapy = None
self._matrix = None
def apply_on_image(self, image):
h, w = image.shape[:2]
if self._mapx is not None and self._mapy is not None and self._matrix is not None:
image = cv2.warpAffine(image,
self._matrix, (w, h),
flags=self._interpolation,
borderMode=self._border_mode)
return cv2.remap(image, self._mapx, self._mapy, self._interpolation, borderMode=self._border_mode)
# If method is called first time:
center_square = np.float32((h, w)) // 2 # Random affine
square_size = min((h, w)) // 3
pts1 = np.float32([
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size
])
pts2 = pts1 + np.random.uniform(
-self._alpha_affine, self._alpha_affine, size=pts1.shape).astype(np.float32)
self._matrix = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image,
self._matrix, (w, h),
flags=self._interpolation,
borderMode=self._border_mode)
dx = gaussian_filter((np.random.rand(h, w) * 2 - 1), self._sigma)
dx = np.float32(dx * self._alpha)
dy = gaussian_filter((np.random.rand(h, w) * 2 - 1), self._sigma)
dy = np.float32(dy * self._alpha)
x, y = np.meshgrid(np.arange(w), np.arange(h))
self._mapx = np.float32(x + dx)
self._mapy = np.float32(y + dy)
return cv2.remap(image, self._mapx, self._mapy, self._interpolation, borderMode=self._border_mode)
def apply_on_masks(self, masks):
return np.array([self.apply_on_image(mask) for mask in list(masks)])
@perform_randomly
class GridDistortion(Operation):
"""
Based on: https://github.com/albu/albumentations/blob/master/albumentations/augmentations/functional.py
"""
def __init__(self,
num_steps=(10, 10),
distort_limit=(.1, 2.),
interpolation=cv2.INTER_LINEAR,
maintain_size=True):
self._num_steps = num_steps
self._xsteps = [
1 + random.uniform(distort_limit[0], distort_limit[1]) for _ in range(num_steps[0] + 1)
]
self._ysteps = [
1 + random.uniform(distort_limit[0], distort_limit[1]) for _ in range(num_steps[1] + 1)
]
self._interpolation = interpolation
self._maintain_size = maintain_size
def apply_on_image(self, img):
h, w = img.shape[:2]
x_step = w // self._num_steps[0]
xx = np.zeros(w, np.float32)
prev = 0
for idx, x in enumerate(range(0, w, x_step)):
start = x
end = x + x_step
if end > w:
end = w
cur = w
else:
cur = prev + x_step * self._xsteps[idx]
xx[start:end] = np.linspace(prev, cur, end - start)
prev = cur
y_step = h // self._num_steps[1]
yy = np.zeros(h, np.float32)
prev = 0
for idx, y in enumerate(range(0, h, y_step)):
start = y
end = y + y_step
if end > h:
end = h
cur = h
else:
cur = prev + y_step * self._ysteps[idx]
yy[start:end] = np.linspace(prev, cur, end - start)
prev = cur
map_x, map_y = np.meshgrid(xx, yy)
map_x = map_x.astype(np.float32)
map_y = map_y.astype(np.float32)
img = cv2.remap(img,
map_x,
map_y,
interpolation=self._interpolation,
borderMode=cv2.BORDER_CONSTANT)
img = 255 - utils.fit_borders(255 - img)
if self._maintain_size:
img = cv2.resize(img, (w, h))
return img
@perform_randomly
class OpticalDistortion(Operation):
"""
Based on: https://github.com/albu/albumentations/blob/master/albumentations/augmentations/functional.py
"""
def __init__(self,
distort_limit_x=(-.003, .003),
distort_limit_y=(-.003, .003),
shift_limit=(-.1, .1),
interpolation=cv2.INTER_LINEAR,
border_color=(0, 0, 0)):
self._shift_limit = shift_limit
self._interpolation = interpolation
self._border_color = border_color
self._k_x = random.uniform(*distort_limit_x)
self._k_y = random.uniform(*distort_limit_y)
self._dx = random.uniform(*shift_limit)
self._dy = random.uniform(*shift_limit)
def apply_on_image(self, img):
h, w = img.shape[:2]
dx = round(w * self._dx)
dy = round(h * self._dy)
k_x = self._k_x * w
k_y = self._k_y * h
fx = w
fy = w
cx = w * 0.5 + dx
cy = h * 0.5 + dy
camera_matrix = | np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]], dtype=np.float32) | numpy.array |
import numpy as np
from scipy.constants import mu_0
# TODO: make this to take a vector rather than a single frequency
def rTEfunfwd(n_layer, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute reflection coefficients for Transverse Electric (TE) mode.
Only one for loop for multiple layers.
Parameters
----------
n_layer : int
The number layers
f : complex, ndarray
Frequency (Hz); size = (n_frequency x n_filter)
lamda : complex, ndarray
Frequency (Hz); size = (n_frequency x n_filter)
sig: compelx, ndarray
Conductivity (S/m); size = (n_layer x n_frequency x n_filter)
chi: compelx, ndarray
Susceptibility (SI); size = (n_layer,)
depth: float, ndarray
Top boundary of the layers; size = (n_ayer,)
HalfSwitch: bool
Switch for halfspace
Returns
-------
rTE: compex, ndarray
Reflection coefficients;
size = (n_frequency x n_lamba)
"""
n_frequency, n_filter = lamda.shape
Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros((n_frequency, n_filter), dtype=complex)
utemp0 = np.zeros((n_frequency, n_filter), dtype=complex)
utemp1 = np.zeros((n_frequency, n_filter), dtype=complex)
const = np.zeros((n_frequency, n_filter), dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0, :, :])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
# may be store these and reuse for sensitivity?
M00 = []
M10 = []
M01 = []
M11 = []
M0sum00 = Mtemp00
M0sum10 = Mtemp10
M0sum01 = Mtemp01
M0sum11 = Mtemp11
if HalfSwitch:
M1sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum00 = M0sum00
M1sum10 = M0sum10
M1sum01 = M0sum01
M1sum11 = M0sum11
else:
for j in range(n_layer-1):
utemp0 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j])*sig[j, :, :])
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[j+1])*sig[j+1, :, :])
const = mu_0*(1+chi[j])*utemp1/(mu_0*(1+chi[j+1])*utemp0)
h0 = thick[j]
Mtemp00 = 0.5*(1.+const)*np.exp(-2.*utemp0*h0)
Mtemp10 = 0.5*(1.-const)
Mtemp01 = 0.5*(1.-const)*np.exp(-2.*utemp0*h0)
Mtemp11 = 0.5*(1.+const)
M1sum00 = M0sum00*Mtemp00 + M0sum01*Mtemp10
M1sum10 = M0sum10*Mtemp00 + M0sum11*Mtemp10
M1sum01 = M0sum00*Mtemp01 + M0sum01*Mtemp11
M1sum11 = M0sum10*Mtemp01 + M0sum11*Mtemp11
M0sum00 = M1sum00
M0sum10 = M1sum10
M0sum01 = M1sum01
M0sum11 = M1sum11
rTE = M1sum01/M1sum11
return rTE
def matmul(a00, a10, a01, a11, b00, b10, b01, b11):
"""
Compute 2x2 matrix mutiplication in vector way
C = A*B
C = [a00 a01] * [b00 b01] = [c00 c01]
[a10 a11] [b10 b11] [c10 c11]
"""
c00 = a00*b00 + a01*b10
c10 = a10*b00 + a11*b10
c01 = a00*b01 + a01*b11
c11 = a10*b01 + a11*b11
return c00, c10, c01, c11
# TODO: make this to take a vector rather than a single frequency
def rTEfunjac(n_layer, f, lamda, sig, chi, depth, HalfSwitch):
"""
Compute sensitivity of reflection coefficients for
Transverse Electric (TE) mode with regard to conductivity
Parameters
----------
n_layer : int
The number layers
f : complex, ndarray
Frequency (Hz); size = (n_frequency x n_finlter)
lamda : complex, ndarray
Frequency (Hz); size = (n_frequency x n_finlter)
sig: compelx, ndarray
Conductivity (S/m); size = (n_layer x 1)
chi: compelx, ndarray
Susceptibility (SI); size = (n_layer x 1)
depth: float, ndarray
Top boundary of the layers
HalfSwitch: bool
Switch for halfspace
Returns
-------
rTE: compex, ndarray
Derivative of reflection coefficients;
size = (n_frequency x n_layer x n_finlter)
"""
# Initializing arrays
n_frequency, n_filter = lamda.shape
Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
Mtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M1sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum00 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum10 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum01 = np.zeros((n_frequency, n_filter), dtype=complex)
M0sum11 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dMtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dj0temp11 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dj1temp11 = np.zeros((n_frequency, n_filter), dtype=complex)
thick = -np.diff(depth)
w = 2*np.pi*f
rTE = np.zeros((n_frequency, n_filter), dtype=complex)
drTE = np.zeros((n_layer, n_frequency, n_filter), dtype=complex)
utemp0 = np.zeros((n_frequency, n_filter), dtype=complex)
utemp1 = np.zeros((n_frequency, n_filter), dtype=complex)
const = np.zeros((n_frequency, n_filter), dtype=complex)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0, :, :])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute M1
Mtemp00 = 0.5*(1+const)
Mtemp10 = 0.5*(1-const)
Mtemp01 = 0.5*(1-const)
Mtemp11 = 0.5*(1+const)
utemp0 = lamda
utemp1 = np.sqrt(lamda**2+1j*w*mu_0*(1+chi[0])*sig[0, :, :])
const = mu_0*utemp1/(mu_0*(1+chi[0])*utemp0)
# Compute dM1du1
dj0Mtemp00 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp10 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp01 = -0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
dj0Mtemp11 = 0.5*(mu_0/(mu_0*(1+chi[0])*utemp0))
# TODO: for computing Jacobian
M00 = []
M10 = []
M01 = []
M11 = []
dJ00 = []
dJ10 = []
dJ01 = []
dJ11 = []
M00.append(Mtemp00)
M01.append(Mtemp01)
M10.append(Mtemp10)
M11.append(Mtemp11)
M0sum00 = Mtemp00.copy()
M0sum10 = Mtemp10.copy()
M0sum01 = Mtemp01.copy()
M0sum11 = Mtemp11.copy()
if HalfSwitch or n_layer == 1:
M1sum00 = M0sum00.copy()
M1sum10 = M0sum10.copy()
M1sum01 = M0sum01.copy()
M1sum11 = M0sum11.copy()
else:
for j in range(n_layer-1):
dJ_10Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ_10Mtemp10 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ_10Mtemp01 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ_10Mtemp11 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ01Mtemp00 = np.zeros((n_frequency, n_filter), dtype=complex)
dJ01Mtemp10 = | np.zeros((n_frequency, n_filter), dtype=complex) | numpy.zeros |
from __future__ import print_function
import numpy as np
from openmdao.api import ExplicitComponent
class EvalVelocities(ExplicitComponent):
def initialize(self):
self.options.declare('surfaces', types=list)
self.options.declare('eval_name', types=str)
self.options.declare('num_eval_points', types=int)
def setup(self):
surfaces = self.options['surfaces']
eval_name = self.options['eval_name']
num_eval_points = self.options['num_eval_points']
system_size = 0
for surface in surfaces:
ny = surface['num_y']
nx = surface['num_x']
name = surface['name']
system_size += (nx - 1) * (ny - 1)
self.system_size = system_size
velocities_name = '{}_velocities'.format(eval_name)
self.add_input('inflow_velocities', shape=(system_size, 3), units='m/s')
self.add_input('circulations', shape=system_size, units='m**2/s')
self.add_output(velocities_name, shape=(num_eval_points, 3), units='m/s')
circulations_indices = np.arange(system_size)
velocities_indices = np.arange(num_eval_points * 3).reshape((num_eval_points, 3))
self.declare_partials(velocities_name, 'circulations',
rows=np.einsum('ik,j->ijk',
velocities_indices, np.ones(system_size, int)).flatten(),
cols=np.einsum('ik,j->ijk',
np.ones((num_eval_points, 3), int), circulations_indices).flatten(),
)
self.declare_partials(velocities_name, 'inflow_velocities', val=1.,
rows=np.arange(3 * num_eval_points),
cols=np.arange(3 * num_eval_points),
)
ind_1 = 0
ind_2 = 0
for surface in surfaces:
ny = surface['num_y']
nx = surface['num_x']
name = surface['name']
num = (nx - 1) * (ny - 1)
ind_2 += num
vel_mtx_name = '{}_{}_vel_mtx'.format(name, eval_name)
self.add_input(vel_mtx_name,
shape=(num_eval_points, nx - 1, ny - 1, 3), units='1/m')
vel_mtx_indices = np.arange(num_eval_points * num * 3).reshape(
(num_eval_points, num, 3))
self.declare_partials(velocities_name, vel_mtx_name,
rows=np.einsum('ik,j->ijk', velocities_indices, np.ones(num, int)).flatten(),
cols=vel_mtx_indices.flatten(),
)
ind_1 += num
def compute(self, inputs, outputs):
surfaces = self.options['surfaces']
eval_name = self.options['eval_name']
num_eval_points = self.options['num_eval_points']
system_size = self.system_size
velocities_name = '{}_velocities'.format(eval_name)
outputs[velocities_name] = inputs['inflow_velocities']
ind_1 = 0
ind_2 = 0
for surface in surfaces:
ny = surface['num_y']
nx = surface['num_x']
name = surface['name']
num = (nx - 1) * (ny - 1)
ind_2 += num
vel_mtx_name = '{}_{}_vel_mtx'.format(name, eval_name)
outputs[velocities_name] += np.einsum('ijk,j->ik',
inputs[vel_mtx_name].reshape((num_eval_points, num, 3)),
inputs['circulations'][ind_1:ind_2],
)
tmp = np.einsum('ijk,j->ik',
inputs[vel_mtx_name].reshape((num_eval_points, num, 3)),
inputs['circulations'][ind_1:ind_2],
)
ind_1 += num
def compute_partials(self, inputs, partials):
surfaces = self.options['surfaces']
eval_name = self.options['eval_name']
num_eval_points = self.options['num_eval_points']
system_size = self.system_size
velocities_name = '{}_velocities'.format(eval_name)
dv_dcirc = np.zeros((num_eval_points, system_size, 3))
ind_1 = 0
ind_2 = 0
for surface in surfaces:
ny = surface['num_y']
nx = surface['num_x']
name = surface['name']
num = (nx - 1) * (ny - 1)
ind_2 += num
vel_mtx_name = '{}_{}_vel_mtx'.format(name, eval_name)
partials[velocities_name, vel_mtx_name] = np.einsum('ijk,j->ijk',
| np.ones((num_eval_points, num, 3)) | numpy.ones |
import functools
import itertools
import joblib
import numba
import numpy as np
def calculate_cic(
y00, y01, y10, y11, quantiles=np.linspace(.1, .9, 9), moments=None,
n_bootstraps=99, n_draws=1000, n_jobs=None, use_corrections=False
):
r"""
Estimates a model using the Changes-In-Changes estimator.
This function estimates non-linear treatments effects using
the Athey & Imbens [1]_ changes-in-changes estimator. Only
the case with continuous data is supported. `calculate_cic`
calculates quantile treatment effects, as well as treatment
effects on arbitrary user-specified functions of the distribution.
The latter are calculated by simulating from the counterfactual
distribution. Standard errors are computed using the bootstrap.
Parameters
----------
y00 : array_like
Observations in the untreated group before the intervention.
y01 : array_like
Observations in the untreated group after the intervention.
y10 : array_like
Observations in the treated group before the intervention.
y11 : array_like
Observations in the treated group after the intervention.
quantiles : array_like, optional
Quantiles at which quantile treatment effects are calculated.
Default is {.1, .2, ..., .9}.
moments : sequence, optional
A sequence of user-defined functions. These functions can be
used to calculate the treatment effect on arbitrary "moments"
of the distribution. Every element should take a single argument,
which is a (simulated or observed) sample for which the
moment should be calculated, and should return a single number,
which is the calculated moment.
n_bootstraps : int, optional
Number of bootstrap simulations to calculate standard errors.
Set to 0 to prevent bootstrapping. In this case, calculated
standard errors will equal zero.
n_draws : int, optional
Number of draws from the counterfactual distribution to
calculate the treatment effect of user-supplied moments. Only
relevant if ``moments is not None``.
n_jobs : int, optional
Number of parallel jobs to use for bootstrapping standard
errors. When None (the default), bootstrap serially. Otherwise,
this interpreted in as in joblib.Parallel, i.e. specify a
positive int to run that many jobs and a negative int to run
``num_cpu + 1 + n_jobs`` jobs.
use_corrections : bool, optional
Use numerical corrections when calculating CDF's and inverse
CDF's as in the original code by Athey & Imbens. Set to
True to obtain quantile treatment effects that are numerically
equivalent to those calculated using the original codes. In
general, using this parameter is unnecessary and discouraged,
see the notes below.
Returns
-------
estimated_quantile_effects : array_like
Estimated quantile treatment effects for every point
in `quantiles` in order.
bootstrap_quantile_se : array_like
Bootstrapped standard errors for every point in
`quantiles` in order.
estimated_moment_effects : array_like, optional
Estimated treatment effects for every function
specified in `moments`. Only returned when
``moments is not None``.
bootstrap_moment_se : array_like, optional
Bootstrapped standard errors for every function
specified in `moments`. Only returned when
``moments is not None``.
Notes
-----
`calculate_cic` calculates the exact counterfactual distribution,
:math:`\bar{F}_{11}`, using
.. math:: \bar{F}_{11}(x) = \hat{F}_{10}(\hat{F}_{00}^{-1}(\hat{F}_{01}(x))),
where :math:`\hat{F}_{it}` is the empirical cdf of group :math:`i` in
period :math:`t`.
Quantile treatment effects are calculated using the generalized
inverse cdf:
.. math:: \begin{align*}
F^{-1}(p) &= \inf\{x \in \mathbb{R} : F(x) \ge p\} \\
\text{qte}(p) &= \hat{F}_{11}^{-1}(p) - \bar{F}_{11}^{-1}(p)
\end{align*}
Moments are calculated by using a probability integral transform
to sample from :math:`\bar{F}_{11}`.
In general, `calculate_cic` does not give numerically equivalent
results to the original code used by Athey & Imbens [2]_. The
reason is that their code makes small numerical adjustments when
calculating cdf's and inverse cdf's. In particular, they calculate
cdf's as :math:`F(x) = P(X \le x + 0.00001)` and inverse cdf's as
:math:`F^{-1}(x) = \inf\{x : F(x) \ge p - 0.000001\}`. (Not that
the correction factors differ by a factor 10!) According to the
comments, this is to "prevent numerical problems". Presumably,
this is because in general floating point comparison is a bad
idea. However, because during the calculation of counterfactual
cdf's floats are only copied and not modified, there is no
reason to be afraid of floating point comparisons in this case.
Nevertheless, the user can provide the parameter `use_corrections`
to use the same corrections as the Athey & Imbey codes. In this case,
the calculated quantile treatment effects will be numerically
equivalent. This option is included mainly to enable unit testing,
users are discouraged from using it because it will in general
lead to slightly wrong results.
References
----------
.. [1] Athey, Susan, and <NAME>. Imbens. 2006. "Identification and
Inference in nonlinear difference‐in‐differences models."
*Econometrica* 74 (2): 431-497.
.. [2] Athey, Susan and <NAME>ens. 2006. "CIC Code".
Accessed April 11, 2019. https://athey.people.stanford.edu/research.
"""
if use_corrections:
cdf_corr = 0.00001
inv_corr = 0.000001
else:
cdf_corr = inv_corr = 0
# Use the same draws for calculating moments during effect size
# calculation as during bootstrapping
draws = np.random.uniform(size=n_draws)
# Quantiles and draws need to be sorted for get_quantiles()
quantiles.sort()
draws.sort()
estimated_quantile_effects, estimated_moment_effects = calculate_effects(
y00, y01, y10, y11, quantiles, moments, draws, cdf_corr, inv_corr)
# Bootstrap standard errors
if n_jobs is None:
# Run sequentially
bootstrap_quantile_eff, bootstrap_moment_eff = zip(*map(
lambda _: bootstrap_sample(y00, y01, y10, y11, quantiles, moments,
draws, cdf_corr, inv_corr),
range(n_bootstraps)))
else:
# Run on multiple cores
# Use threads a background since most of the time will be
# spent in NumPy routines, which release the GIL
ret = joblib.Parallel(n_jobs=n_jobs, prefer='threads')(
joblib.delayed(bootstrap_sample)(y00, y01, y10, y11, quantiles,
moments, draws, cdf_corr,
inv_corr)
for _ in range(n_bootstraps)
)
bootstrap_quantile_eff, bootstrap_moment_eff = zip(*ret)
# Concatenate into a single numpy array
bootstrap_quantile_eff = np.concatenate([
x[np.newaxis] for x in bootstrap_quantile_eff
], axis=0)
bootstrap_moment_eff = np.concatenate([
x[np.newaxis] for x in bootstrap_moment_eff
], axis=0)
if n_bootstraps > 0:
bootstrap_quantile_se = np.std(bootstrap_quantile_eff, axis=0)
bootstrap_moment_se = np.std(bootstrap_moment_eff, axis=0)
else:
bootstrap_quantile_se = np.zeros(quantiles.shape[0])
n_moments = len(moments) if moments is not None else 0
bootstrap_moment_se = np.zeros(n_moments)
if moments is None:
return estimated_quantile_effects, bootstrap_quantile_se
else:
return (estimated_quantile_effects, bootstrap_quantile_se,
estimated_moment_effects, bootstrap_moment_se)
class CICModel:
def __init__(
self, y, g, t, treat, quantiles=np.linspace(.1, .9, 9), moments=None,
n_draws=1000, n_bootstraps=99, n_jobs=None
):
self.quantiles = quantiles
n_obs = y.shape[0]
n_groups = treat.shape[0]
n_periods = treat.shape[1]
if t.shape[0] != n_obs:
raise ValueError('len(y) should equal len(t).')
if g.shape[0] != n_obs:
raise ValueError('len(y) should equal len(g).')
if t.max() >= n_periods:
raise ValueError('Invalid period provided for some observations.')
if g.max() >= n_groups:
raise ValueError('Invalid group provided for some observations.')
if np.any((~treat[:, 1:]) & (treat[:, 1:] ^ treat[:, :-1])):
raise ValueError('A group cannot become untreated after becoming'
' treated.')
self.g = g
self.t = t
# Use the same draws for calculating moments during effect size
# calculation as during bootstrapping
draws = np.random.uniform(size=n_draws)
# Quantiles and draws need to be sorted for get_quantiles()
quantiles.sort()
draws.sort()
# Calculate the effect using all possible combinations of treatment
# and control
possible_combinations = tuple(filter(
lambda x: valid_combination(treat, *x),
itertools.product(range(n_groups), range(n_periods), repeat=2)))
self.effects = calculate_multiple_effects(
y, g, t, possible_combinations, quantiles, moments, draws)
# Bootstrap the covariance matrix of the treatments effects
calc_bootstrap = functools.partial(
self._bootstrap_multiple_effects, y, g, t, treat, quantiles,
moments, possible_combinations, draws
)
if n_jobs is None:
bootstrap_effects = np.empty((n_bootstraps, self.effects.shape[0],
self.effects.shape[1]))
for i in range(n_bootstraps):
bootstrap_effects[i] = calc_bootstrap()
else:
bootstrap_effects = joblib.Parallel(n_jobs, prefer='threads', verbose=11)(
joblib.delayed(calc_bootstrap)() for _ in range(n_bootstraps)
)
# bootstrap_effects is a list of ndarray's, make it a single
# ndarray
bootstrap_effects = np.concatenate([
x[np.newaxis] for x in bootstrap_effects
], axis=0)
# Calculate the combined effect
self.n_treatment_effects = treat.sum()
self.treatment_for = np.empty((self.n_treatment_effects, 2))
# The matrix A maps `effects` into the (g, t)-treatment effect
self.A = np.zeros((len(possible_combinations),
self.n_treatment_effects))
i = 0
for g1, t1 in itertools.product(range(n_groups), range(n_periods)):
if treat[g1, t1]:
self.A[:, i] = tuple(map(
lambda x: x[2] == g1 and x[3] == t1,
possible_combinations
))
self.treatment_for[i] = g1, t1
i += 1
effect = np.empty((self.n_treatment_effects, self.effects.shape[1]))
effect_se = np.empty_like(effect)
self.cov_inv = np.empty((self.effects.shape[1],
len(possible_combinations),
len(possible_combinations)))
for effect_ind in range(self.effects.shape[1]):
# TODO: The covariance of the bootstrap sample is not necessarily a
# good estimator of the covariance matrix! Perhaps try also using
# the percentile method. See Machado, <NAME>. and <NAME>.
# 2005. "Bootstrap estimation of covariance matrices via the
# percentile method." Econometrics Journal 8: 70-78.
cov = np.cov(bootstrap_effects[:, :, effect_ind], rowvar=False,
bias=True)
if self.effects.shape[0] == 1:
# In this case np.cov() returns a scalar. Invert it and make
# it a matrix.
cov_inv = (1 / cov)[np.newaxis, np.newaxis]
else:
cov_inv = np.linalg.pinv(cov)
self.cov_inv[effect_ind] = cov_inv
effect[:, effect_ind] = np.linalg.solve(
self.A.T @ cov_inv @ self.A,
self.A.T @ cov_inv @ self.effects[:, effect_ind])
effect_cov = np.linalg.inv(self.A.T @ cov_inv @ self.A)
effect_se[:, effect_ind] = np.sqrt(np.diag(effect_cov))
self.all_effect = effect
self.all_se = effect_se
self.quantile_effect = effect[:, :quantiles.shape[0]]
self.quantile_se = effect_se[:, :quantiles.shape[0]]
if moments is None:
self.moment_effect = np.empty((self.n_treatment_effects, 0))
self.moment_se = np.empty_like(self.moment_effect)
else:
self.moment_effect = effect[:, quantiles.shape[0]:]
self.moment_se = effect_se[:, quantiles.shape[0]:]
def treatment_quantile(self, g, t):
ind = self._treatment_ind(g, t)
return self.quantile_effect[ind], self.quantile_se[ind]
def treatment_moment(self, g, t):
ind = self._treatment_ind(g, t)
return self.moment_effect[ind], self.moment_se[ind]
def _treatment_ind(self, g, t):
row_match = (self.treatment_for == np.array([g, t])).all(axis=1)
return np.nonzero(row_match)[0][0]
def test_model_based_on_quantile(self, quantile_ind):
effects_ind = quantile_ind
mean_diff = (self.effects[:, effects_ind] -
self.A @ self.quantile_effect[:, quantile_ind])
cov_inv = self.cov_inv[effects_ind]
test_stat = mean_diff.T @ cov_inv @ mean_diff
# We need the rank of V here, and cov is the psuedo-inverse of V.
# However, the rank of the pseudo-inverse is the same as of the
# original matrix so there is no problem here.
rank_dist = (np.linalg.matrix_rank(cov_inv) -
self.n_treatment_effects)
return test_stat, rank_dist
def test_model_based_on_moment(self, moment_ind):
effects_ind = moment_ind + self.quantiles.shape[0]
mean_diff = (self.effects[:, effects_ind] -
self.A @ self.moment_effect[:, moment_ind])
cov_inv = self.cov_inv[effects_ind]
test_stat = mean_diff.T @ cov_inv @ mean_diff
# We need the rank of V here, and cov is the psuedo-inverse of V.
# However, the rank of the pseudo-inverse is the same as of the
# original matrix so there is no problem here.
rank_dist = (np.linalg.matrix_rank(cov_inv) -
self.n_treatment_effects)
return test_stat, rank_dist
def combine_effects(self, effects_for, weigh_by='n'):
n_effects = self.all_effect.shape[1]
weights = np.zeros((n_effects, self.n_treatment_effects, 1))
if weigh_by == 'n':
for i in range(self.n_treatment_effects):
g, t = self.treatment_for[i]
if (g, t) in effects_for:
weights[:, i] = np.sum((self.g == g) & (self.t == t))
weights /= weights[0].sum()
elif weigh_by == 'cov':
target = np.zeros((self.n_treatment_effects, 1),
dtype=np.bool_)
for i in range(self.n_treatment_effects):
g, t = self.treatment_for[i]
if (g, t) in effects_for:
target[i] = True
for effect_ind in range(n_effects):
weights[effect_ind] = np.linalg.solve(
target.T @ self.A.T @ self.cov_inv[effect_ind] @ self.A @ target,
target.T @ self.A.T @ self.cov_inv[effect_ind] @ self.A
).T
else:
raise ValueError('Invalid value for weigh_by, use n or cov.')
weighed_effects = np.empty(n_effects)
weighed_se = np.empty_like(weighed_effects)
for effect_ind in range(n_effects):
weighed_effects[effect_ind] = (weights[effect_ind].T @
self.all_effect[:, effect_ind])
weighed_cov = weights[effect_ind].T @ np.linalg.solve(
self.A.T @ self.cov_inv[effect_ind] @ self.A,
weights[effect_ind])
weighed_se[effect_ind] = np.sqrt(np.diag(weighed_cov))
quantile_combined = weighed_effects[:self.quantiles.shape[0]]
quantile_combined_se = weighed_se[:self.quantiles.shape[0]]
moment_combined = weighed_effects[self.quantiles.shape[0]:]
moment_combined_se = weighed_se[self.quantiles.shape[0]:]
return (quantile_combined, quantile_combined_se, moment_combined,
moment_combined_se)
def _bootstrap_multiple_effects(
self, y, g, t, treat, quantiles, moments, possible_combinations, draws
):
y_resample = np.empty_like(y)
n_groups = treat.shape[0]
n_periods = treat.shape[1]
for j, k in itertools.product(range(n_groups), range(n_periods)):
target = (g == j) & (t == k)
y_resample[target] = np.random.choice(y[target], target.sum(),
replace=True)
return calculate_multiple_effects(
y_resample, g, t, possible_combinations, quantiles, moments,
draws
)
def calculate_effects(
y00, y01, y10, y11, quantiles, moments, draws, cdf_corr, inv_corr
):
# Calculate quantile treatment effects
observed_quantiles = get_quantiles(*cdf_support(y11, cdf_corr),
quantiles, inv_corr)
cf_cdf, cf_support = calc_cf_cdf(y00, y01, y10, cdf_corr, inv_corr)
cf_quantiles = get_quantiles(cf_cdf, cf_support, quantiles, inv_corr)
quantile_effects = observed_quantiles - cf_quantiles
if moments is not None:
observed_moments = np.array([f(y11) for f in moments])
cf_draws = sample_from_cdf(cf_cdf, cf_support, draws)
cf_moments = np.array([f(cf_draws) for f in moments])
moment_effects = observed_moments - cf_moments
return quantile_effects, moment_effects
else:
return quantile_effects, None
def calculate_multiple_effects(
y, g, t, possible_combinations, quantiles, moments, draws
):
n_targets = quantiles.shape[0]
if moments is not None:
n_targets += len(moments)
effects = np.empty((len(possible_combinations), n_targets))
for i, (g0, t0, g1, t1) in enumerate(possible_combinations):
y00 = y[(g == g0) & (t == t0)]
y01 = y[(g == g0) & (t == t1)]
y10 = y[(g == g1) & (t == t0)]
y11 = y[(g == g1) & (t == t1)]
# calculate_effects returns None as second element if moments is None.
# When moments is not None, we want to concatenate the return elements.
effects[i] = np.concatenate([
x for x in
calculate_effects(y00, y01, y10, y11, quantiles, moments,
draws, 0, 0)
if x is not None])
return effects
def bootstrap_sample(
y00, y01, y10, y11, quantiles, moments, draws, cdf_corr, inv_corr
):
y00_resample = np.random.choice(y00, y00.shape[0], replace=True)
y01_resample = np.random.choice(y01, y01.shape[0], replace=True)
y10_resample = np.random.choice(y10, y10.shape[0], replace=True)
y11_resample = | np.random.choice(y11, y11.shape[0], replace=True) | numpy.random.choice |
#!/usr/bin/env python
"""
@package ion_functions.data.msp_functions
@file ion_functions/data/msp_functions.py
@author <NAME>
@brief Module containing MASSP instrument related functions and wrapper functions
MASSP L1 Data Products
Data Product Specification for Dissolved Gas Concentrations (DISSGAS) from the
MASSP Instrument. Document Control Number 1341-00240.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled
>> 1000 System Level >> 1341-00240_DPS_DISSGAS.pdf)
The OOI Level 1 Dissolved Gas Concentrations (DISSGAS) core data product is
produced by the MASSP instrument class. The data for the computation of this L1
core data product are derived from the Residual Gas Analyzer (RGA) integrated in
the MASSP instrument. The resulting L1 DISSGAS core data product is calculated
from the L0 Mass Spectral Intensities and the sample temperature, also measured
by the MASSP instrument, and is composed of the dissolved concentrations (uM) of
the individual gases: methane, ethane, hydrogen, argon, hydrogen sulfide, oxygen
and carbon dioxide. NOTE: For methane, the Nafion mode data is used, while for
the rest of the gasses, Direct mode data is used.
Function Name L1 DP Name Description
calc_dissgas_smpmethcon DISSGAS-SMPMETHCON Dissolved Methane Conc. (uM) in Sample
calc_dissgas_smpethcon DISSGAS-SMPETHNCON Dissolved Ethane Conc. (uM) in Sample
calc_dissgas_smph2con DISSGAS-SMPH2CON Dissolved Hydrogen Conc. (uM) in Sample
calc_dissgas_smparcon DISSGAS-SMPARCON Dissolved Argon Conc. (uM) in Sample
calc_dissgas_smph2scon DISSGAS-SMPH2SCON Dissolved Hydrogen Sulfide Conc. (uM) in Sample
calc_dissgas_smpo2con DISSGAS-SMPO2CON Dissolved Oxygen Conc. (uM) in Sample
calc_dissgas_smpco2con DISSGAS-SMPCO2CON Dissolved Carbon Dioxide Conc. (uM) in Sample
calc_dissgas_bkgmethcon DISSGAS-BKGMETHCON Dissolved Methane Conc. (uM) in Background Water
calc_dissgas_bkgethcon DISSGAS-BKGETHNCON Dissolved Ethane Conc. (uM) in Background Water
calc_dissgas_bkgh2con DISSGAS-BKGH2CON Dissolved H2 Conc. (uM) in Background Water
calc_dissgas_bkgarcon DISSGAS-BKGARCON Dissolved AR Conc. (uM) in Background Water
calc_dissgas_bkgh2scon DISSGAS-BKGH2SCON Dissolved Hydrogen Sulfide Conc. (uM) in Background Water
calc_dissgas_bkgo2con DISSGAS-BKGCO2CON Dissolved Carbon Dioxide Conc. (uM) in Background Water
calc_dissgas_bkgco2con DISSGAS-BKGO2CON Dissolved Oxygen Conc. (uM) in Background Water
calc_dissgas_cal1methcon DISSGAS-CA1METHCON Dissolved Methane Conc. (uM) in Calibration Solution 1
calc_dissgas_cal1co2con DISSGAS-CA1CO2CON Dissolved Carbon Dioxide Conc. (uM) in Calibration Solution 1
calc_dissgas_cal2methcon DISSGAS-CA2METHCON Dissolved Methane Conc. (uM) in Calibration Solution 2
calc_dissgas_cal2co2con DISSGAS-CA2CO2CON Dissolved Carbon Dioxide Conc. (uM) in Calibration Solution 2
...................................................................................
The auxiliary data product MASSP Calibration Range (CALRANG) is the collection
of parameters associated with the quality status for each gas concentration. A
value of 0 indicates that both the intensity and temperature used are within the
calibration range. A value of -1 indicates that the intensity used was below the
minimum of the calibration range. A value of 1 indicates that the intensity was
higher than the maximum of the calibration range, but the temperature was within
the calibration range. A value of 2 indicates that the intensity was within the
calibration range, but that the temperature was above the calibration range. A
value of 3 indicates that both the intensity and the temperature were above the
calibration range.
Function Name AUX L1 DP Name Description
calc_calrang_smpmethcon CALRANG-SMPMETH Quality status for the Methane conc. in the sample water
calc_calrang_smpethcon CALRANG-SMPETHN Quality status for the Ethane conc. in the sample water
calc_calrang_smph2con CALRANG-SMPH2 Quality status for the Hydrogen conc. in the sample water
calc_calrang_smparcon CALRANG-SMPAR Quality status for the Argon conc. in the sample water
calc_calrang_smph2scon CALRANG-SMPH2S Quality status for the H2S conc. in the sample water
calc_calrang_smpo2con CALRANG-SMPO2 Quality status for the oxygen conc. in the sample water
calc_calrang_smpco2con CALRANG-SMPCO2 Quality status for the CO2 conc. in the sample water
calc_calrang_bkgmethcon CALRANG-BKGMETH Quality status for the Methane conc. in the background water
calc_calrang_bkgethcon CALRANG-BKGETHN Quality status for the Ethane conc. in the background water
calc_calrang_bkgh2con CALRANG-BKGH2 Quality status for the Hydrogen conc. in the background water
calc_calrang_bkgarcon CALRANG-BKGAR Quality status for the Argon conc. in the background water
calc_calrang_bkgh2scon CALRANG-BKGH2S Quality status for the H2S conc. in the background water
calc_calrang_bkgo2con CALRANG-BKGO2 Quality status for the oxygen conc. in the background water
calc_calrang_bkgco2con CALRANG-BKGCO2 Quality status for the CO2 conc. in the background water
calc_calrang_cal1methcon CALRANG-CAL1METH Quality status for the Methane conc. in the calibration fluid 1 water
calc_calrang_cal1co2con CALRANG-CAL1CO2 Quality status for the CO2 conc. in the calibration fluid 1 water
calc_calrang_cal2methcon CALRANG-CAL2METH Quality status for the Methane conc. in the calibration fluid 2 water
calc_calrang_cal2co2con CALRANG-CAL2CO2 Quality status for the CO2 conc. in the calibration fluid 2 water
...................................................................................
The auxiliary data product MASSP Time Stamp (TSTAMP) is the collection
of parameters associated with the time stamp for each gas concentration.
Function Name AUX L1 DP Name Description
calc_timestamp_smpmethcon TSTAMP-SMPMETH Time stamp for the Methane conc. in the sample water
calc_timestamp_smpethcon TSTAMP-SMPETHN Time stamp for the Ethane conc. in the sample water
calc_timestamp_smph2con TSTAMP-SMPH2 Time stamp for the Hydrogen conc. in the sample water
calc_timestamp_smparcon TSTAMP-SMPAR Time stamp for the Argon conc. in the sample water
calc_timestamp_smph2scon TSTAMP-SMPH2S Time stamp for the H2S conc. in the sample water
calc_timestamp_smpo2con TSTAMP-SMPO2 Time stamp for the oxygen conc. in the sample water
calc_timestamp_smpco2con TSTAMP-SMPCO2 Time stamp for the CO2 conc. in the sample water
calc_timestamp_bkgmethcon TSTAMP-BKGMETH Time stamp for the Methane conc. in the background water
calc_timestamp_bkgethcon TSTAMP-BKGETHN Time stamp for the Ethane conc. in the background water
calc_timestamp_bkgh2con TSTAMP-BKGH2 Time stamp for the Hydrogen conc. in the background water
calc_timestamp_bkgarcon TSTAMP-BKGAR Time stamp for the Argon conc. in the background water
calc_timestamp_bkgh2scon TSTAMP-BKGH2S Time stamp for the H2S conc. in the background water
calc_timestamp_bkgo2con TSTAMP-BKGO2 Time stamp for the oxygen conc. in the background water
calc_timestamp_bkgco2con TSTAMP-BKGCO2 Time stamp for the CO2 conc. in the background water
calc_timestamp_cal1methcon TSTAMP-CAL1METH Time stamp for the Methane conc. in the calibration fluid 1 water
calc_timestamp_cal1co2con TSTAMP-CAL1CO2 Time stamp for the CO2 conc. in the calibration fluid 1 water
calc_timestamp_cal2methcon TSTAMP-CAL2METH Time stamp for the Methane conc. in the calibration fluid 2 water
calc_timestamp_cal2co2con TSTAMP-CAL2CO2 Time stamp for the CO2 conc. in the calibration fluid 2 water
...................................................................................
Functions that calculate all addtional L1 auxiliary data products are listed below.
The auxiliary data product MASSP Sample Inlet (MSINLET) is the collection of
parameters associated with measurement of sample properties at the time of gas
equilibration across the gas permeable membrane.
Function Name AUX L1 DP Name Description
calc_msinlet_smpphint MSINLET-SMPPHINT Sample pH Intensity
calc_msinlet_smpphint_timestamp TSTAMP-SMPPHINT Time stamp of Sample pH Intensity
calc_msinlet_bkgphint MSINLET-BKGPHINT Background Water pH Intensity
calc_msinlet_bkgphint_timestamp TSTAMP-BKGPHINT Time stamp of Background Water pH Intensity
calc_msinlet_cal1phint MSINLET-CA1PHINT Calibration Solution 1 pH Intensity
calc_msinlet_cal1phint_timestamp TSTAMP-CA1PHINT Time stamp of Calibration Solution 1 pH Intensity
calc_msinlet_cal2phint MSINLET-CA2PHINT Calibration Solution 2 pH Intensity
calc_msinlet_cal1phint_timestamp TSTAMP-CA2PHINT Time stamp of Calibration Solution 2 pH Intensity
calc_smpnafeff NAFEFF Nafion Drier Efficiency
calc_smpnafeff_timestamp TSTAMP-NAFEFF Time stamp of Nafion Drier Efficiency
...................................................................................
...................................................................................
...................................................................................
MASSP L2 Data Products
Data Product Specification for Dissolved Gas Concentrations (TOTLGAS) from the
MASSP Instrument. Document Control Number 1341-00XXX.
https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI >> Controlled
>> 1000 System Level >> 1341-00XXX_DPS_TOTLGAS.pdf)
The OOI Level 2 Dissolved Gas Concentration (DISSGAS) core data product is
produced by the MASSP instrument class. The data for the computation of this L2
core data product are derived from the L1 core data product DISSGAS. The
resulting L2 TOTLGAS core data product is calculated from the individual
dissolved gas concentrations, the inlet fluid temperature, and the pH of the
fluid also measured by the MASSP instrument, and is composed of the total
concentrations (uM) of the individual gases: hydrogen sulfide and carbon
dioxide.
Function Name L2 DP Name Description
calc_l2_totlgas_smph2scon TOTLGAS-SMPH2SCON Total Hydrogen Sulfide Conc. (uM) in Sample Water
calc_l2_totlgas_smpco2con TOTLGAS-SMPCO2CON Total Carbon Dioxide Conc. (uM) in Sample Water
calc_l2_totlgas_bkgh2scon TOTLGAS-BKGH2SCON Total Hydrogen Sulfide Conc. (uM) in Background Water
calc_l2_totlgas_bkgco2con TOTLGAS-BKGCO2CON Total Carbon Dioxide Conc. (uM) in Background Water
...................................................................................
The auxiliary data product MASSP Time Stamp (TSTAMP) is the collection
of parameters associated with the time stamp for each gas concentration.
Function Name AUX L2 DP Name Description
calc_timestamp_totlgas_smph2scon TSTAMP-SMPH2SCON Time stamp for the total H2S conc. in the sample water
calc_timestamp_totlgas_smpco2con TSTAMP-SMPCO2CON Time stamp for the total CO2 conc. in the sample water
calc_timestamp_totlgas_bkgh2scon TSTAMP-BKGH2SCON Time stamp for the total H2S conc. in the background water
calc_timestamp_totlgas_bkgco2con TSTAMP-BKGCO2CON Time stamp for the total CO2 conc. in the background water
...................................................................................
Functions that calculate all addtional L2 auxiliary data products are listed below:
The auxiliary data product MASSP Equilibrated Water (MSWATER) is the collection
of higher level products describing the pH state of the sampled and background
water at equilibration and measurement by the Residual Gas Analyzer (RGA),
onboard the MASSP instrument. These functions are required to calculate the above
L2 data products namely TOTLGAS-SMPH2SCON, TOTLGAS-SMPCO2CON, TOTLGAS-BKGH2SCON,
and TOTLGAS-BKGCO2CON.
Function Name AUX L2 DP Name Description
calc_l2_mswater_smpphval MSWATER-SMPPHVAL Mass Spectrometer Equilibrated Sample Water pH Value
calc_l2_mswater_bkgphval MSWATER-BKGPHVAL Mass Spectrometer Equilibrated Background Water pH Value
...................................................................................
...................................................................................
...................................................................................
The core functions used by all of the wrapper functions described above are listed below. Note that
the mass-to-charge ratio is denoted as mz.
Function Name Description
SamplePreProcess This subroutine takes in the SAMPLEINT array and produces intermediary
variables sample-mz2, sample-mz18, sample-mz30, sample-mz32,
sample-mz40, sample-mz44, sample-mz15 and sample-mz18Naf, sample-Tnaf,
sample-Tdir as well as MSINLET-SMPPHINT AUX data products.
BackgroundPreProcess This subroutine takes in the BKGNDINT array and produces intermediary
variables bckgnd-mz2, bckgnd-mz30, bckgnd-mz32, bckgnd-mz40,
bckgnd-mz44, bckgnd-mz15, bckgnd-Tnaf, bckgnd-Tdir, as well as
MSINLET-BKGPHINT AUX data products.
Cal1PreProcess This subroutine takes in the DISSGAS-CALINT01 array and produces intermediary
variables cal1-mz44, cal1-mz15, cal1-Tnaf, cal1-Tdir as well as
MSINLET-CA1PHINT AUX data product.
Cal2PreProcess This subroutine takes in the DISSGAS-CALINT02 array and produces intermediary
variables cal2-mz44, cal2-mz15, cal2-Tnaf, cal2-Tdir as well as
MSINLET-CA2PHINT AUX data product.
gas_concentration This sub-routine takes in a column range from DPS Table 1 (refered
as to c1, c2, c3, c4), a corrected intensity (referred as x, from the
Deconvolution subroutine), an averaged temperature (referred as T, see DPS Table 1),
the pressure P of the sampling site, and calculate the final concentration used
for L1 DISSGAS data products. This subroutine also assigns a value to the
corresponding CALRANG parameter (see DPS Table 1) identifying the quality of the
concentration value (indicate if it is out of calibration range for
concentration and/or temperature). The subroutine also uses a temporary
variable, tempCalRang used to compute the final value of CALRANG.
average_mz This subroutine takes in an mz as parameter (M), parameter w from the calibration
table and a subset of n scans.
deconvolution_correction This sub-routine takes in a main variable (see DPS Table 1), a second variable
(see DPS Table 1), and a calibration lookup table (DPS Table 2) column
range (see Table 1).
GasModeDetermination Takes in the values of sample_valve1, sample_valve2, sample_valve3, and
sample_valve4 and returns the GASMODE array.
SmpModeDetermination Takes in the values of external_valve1_status, external_valve2_status,
external_valve3_status, external_valve4_status, and external_valve5_status
and returns the SMPMODE array.
"""
# import main python modules
import numpy as np
#Block of functions that calculate the L2 data products
def calc_l2_totlgas_smph2scon(port_timestamp_sampleint, L0_dissgas_sampleint,
gas_mode_sampleint, port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table,
l2_ph_calibration_table, sensor_depth, salinity):
'''
Below are the steps for calculating L2 TOTLGAS- SMPH2SCON and L2 TOTLGAS-
BKGH2SCON core data products from L1 DISSGAS-SMPH2SCON and
DISSGAS-BKGH2SCON, the L1 auxilliary data MSINLET-TEMP, the pressure at the
site P and the value S from the calibration table, and the higher auxiliary
data product MSWATER-SMPPHVAL and MSWATER-BKGPHVAL
'''
ph_temp_array = calc_l2_mswater_smpphval(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table,
l2_ph_calibration_table)
t = ph_temp_array[1]
ph = ph_temp_array[0]
smph2scon = calc_dissgas_smph2scon(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table, sensor_depth)
#Converth depth (meters) to pressure (psi)
pressure = (sensor_depth * 0.099204 + 1) * 14.695
#estimated salinity == 35
PSU = salinity
k1T = 10**(-19.83 - (930.8 / (t+273.15)) + (2.8 * np.log(t + 273.15)) -
(np.sqrt(PSU) * (-0.2391 + 35.685 / (t+273.15))) - (PSU * (0.0109 - (0.3776
/ (t+273.15)))))
r = ((11.07 + 0.009 * t + 0.000942 * t**2) * 0.0689475729 * pressure +
(-6.869 * 10**(-6) + 1.2835 * 10**(-7) * t) * pressure**2) / ((t+273.15) * 83.131)
k1 = np.exp(r) * k1T
beta = 1 + (k1 / 10**-ph)
totlgas_smph2scon = beta * smph2scon
return totlgas_smph2scon
def calc_l2_totlgas_smpco2con(port_timestamp_sampleint, L0_dissgas_sampleint,
gas_mode_sampleint, port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table,
l2_ph_calibration_table, sensor_depth, salinity):
'''
Below are the steps for calculating L2 TOTLGAS- SMPCO2CON and L2 TOTLGAS-
BKGCO2CON core data products from L1 DISSGAS-SMPCO2CON and
DISSGAS-BKGCO2CON, the L1 auxiliary data MSINLET-TEMP, the pressure at the
site P and the value S from the calibration table and the higher auxiliary
data MSWATER-SMPPHVAL and MSWATER-BKGPHVAL
'''
ph_temp_array = calc_l2_mswater_smpphval(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table,
l2_ph_calibration_table)
t = ph_temp_array[1]
ph = ph_temp_array[0]
smpco2con = calc_dissgas_smpco2con(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table, sensor_depth)
#Converth depth (meters) to pressure (psi)
pressure = (sensor_depth * 0.099204 + 1) * 14.695
#estimated salinity == 35
PSU = salinity
K1T = np.exp((2.83655 - 2307.1266 / (t + 273.15) - 1.5529413 * np.log(t + 273.15) -
(0.20760841 * 4.0484 / (t + 273.15)) * np.sqrt(PSU) + 0.0846834 *
PSU - 0.00654208 * np.sqrt(PSU**3) + np.log(1 - 0.001005 * PSU)))
K2T = np.exp((-9.226508 - 3351.616 / (t + 273.15) - 0.2005743 * np.log(t + 273.15) -
(0.106901773 * 23.9722 / (t + 273.15)) * np.sqrt(PSU) + 0.1130822 *
PSU - 0.00846934 * np.sqrt(PSU**3) + np.log(1 - 0.001005 * PSU)))
r1 = (pressure * (1.758163 - 0.008763 * t - pressure * ((7.32 * 10**-6) -
(2.0845 * 10**-7 * t))) / ((t + 273.15) * 83.131))
r2 = (pressure * (1.09075 + 0.00151 * t + pressure * ((2.69 * 10**-6) -
(3.506 * 10**-7 * t))) / ((t + 273.15) * 83.131))
K1 = np.exp(r1) * K1T
K2 = np.exp(r2) * K2T
alpha = 1 + (K1 / (10**-ph)) + ((K1 * K2) / (10**-ph)**2)
totlgas_smpco2con = alpha * smpco2con
return totlgas_smpco2con
def calc_timestamp_totlgas_smph2scon(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved hydrogen
sulfide in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table,
calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_timestamp_totlgas_smpco2con(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved carbon dioxide
in the sample water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table,
calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_l2_totlgas_bkgh2scon(port_timestamp_bkgndint, L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table,
l2_ph_calibration_table, sensor_depth, salinity):
'''
Below are the steps for calculating L2 TOTLGAS- SMPH2SCON and L2 TOTLGAS-
BKGH2SCON core data products from L1 DISSGAS-SMPH2SCON and
DISSGAS-BKGH2SCON, the L1 auxilliary data MSINLET-TEMP, the pressure at the
site P and the value S from the calibration table, and the higher auxiliary
data product MSWATER-SMPPHVAL and MSWATER-BKGPHVAL
'''
ph_temp_array = calc_l2_mswater_bkgphval(port_timestamp_bkgndint,
L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table,
l2_ph_calibration_table)
t = ph_temp_array[1]
ph = ph_temp_array[0]
bkgh2scon = calc_dissgas_bkgh2scon(port_timestamp_bkgndint,
L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table, sensor_depth)
#Converth depth (meters) to pressure (psi)
pressure = (sensor_depth * 0.099204 + 1) * 14.695
#estimated salinity == 35
PSU = salinity
k1T = 10**(-19.83 - (930.8 / (t+273.15)) + (2.8 * np.log(t + 273.15)) -
(np.sqrt(PSU) * (-0.2391 + 35.685 / (t+273.15))) - (PSU * (0.0109 - (0.3776
/ (t+273.15)))))
r = ((11.07 + 0.009 * t + 0.000942 * t**2) * 0.0689475729 * pressure +
(-6.869 * 10**(-6) + 1.2835 * 10**(-7) * t) * pressure**2) / ((t+273.15) * 83.131)
k1 = np.exp(r) * k1T
beta = 1 + (k1 / 10**-ph)
totlgas_bkgh2scon = beta * bkgh2scon
return totlgas_bkgh2scon
def calc_l2_totlgas_bkgco2con(port_timestamp_bkgndint, L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table,
l2_ph_calibration_table, sensor_depth, salinity):
'''
Below are the steps for calculating L2 TOTLGAS- SMPCO2CON and L2 TOTLGAS-
BKGCO2CON core data products from L1 DISSGAS-SMPCO2CON and
DISSGAS-BKGCO2CON, the L1 auxiliary data MSINLET-TEMP, the pressure at the
site P and the value S from the calibration table and the higher auxiliary
data MSWATER-SMPPHVAL and MSWATER-BKGPHVAL
'''
ph_temp_array = calc_l2_mswater_bkgphval(port_timestamp_bkgndint,
L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table,
l2_ph_calibration_table)
t = ph_temp_array[1]
ph = ph_temp_array[0]
bkgco2con = calc_dissgas_bkgco2con(port_timestamp_bkgndint,
L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table, sensor_depth)
#Converth depth (meters) to pressure (psi)
pressure = (sensor_depth * 0.099204 + 1) * 14.695
#estimated salinity == 35
PSU = salinity
K1T = np.exp((2.83655 - 2307.1266 / (t + 273.15) - 1.5529413 * np.log(t + 273.15) -
(0.20760841 * 4.0484 / (t + 273.15)) * np.sqrt(PSU) + 0.0846834 *
PSU - 0.00654208 * np.sqrt(PSU**3) + np.log(1 - 0.001005 * PSU)))
K2T = np.exp((-9.226508 - 3351.616 / (t + 273.15) - 0.2005743 * np.log(t + 273.15) -
(0.106901773 * 23.9722 / (t + 273.15)) * np.sqrt(PSU) + 0.1130822 *
PSU - 0.00846934 * np.sqrt(PSU**3) + np.log(1 - 0.001005 * PSU)))
r1 = (pressure * (1.758163 - 0.008763 * t - pressure * ((7.32 * 10**-6) -
(2.0845 * 10**-7 * t))) / ((t + 273.15) * 83.131))
r2 = (pressure * (1.09075 + 0.00151 * t + pressure * ((2.69 * 10**-6) -
(3.506 * 10**-7 * t))) / ((t + 273.15) * 83.131))
K1 = np.exp(r1) * K1T
K2 = np.exp(r2) * K2T
alpha = 1 + (K1 / (10**-ph)) + ((K1 * K2) / (10**-ph)**2)
totlgas_bkgco2con = alpha * bkgco2con
return totlgas_bkgco2con
def calc_timestamp_totlgas_bkgh2scon(port_timestamp_bkgndint,
L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved hydrogen sulfide
in the background water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint,
L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table,
calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
def calc_timestamp_totlgas_bkgco2con(port_timestamp_bkgndint,
L0_dissgas_bkgndint,
gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved carbon dioxide
in the background water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint,
L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table,
calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
#Block of wrapper functions for calculating the pH intensity auxiliary data products and associated timestamps
def calc_l2_mswater_smpphval(port_timestamp_sampleint, L0_dissgas_sampleint,
gas_mode_sampleint, port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table,
l2_ph_calibration_table):
'''
Below are the steps for processing the auxiliary products MSINLET-TEMP,
MSINLET-SMPPHINT and MSINLET-BKGPHINT into the higher level auxiliary
products MSWATER-SMPPHVAL MSWATER-BKGPHVAL.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint,
L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table,
calibration_table)
#msinlet_temp is the 12th element of the preprocess_array array
msinlet_temp = preprocess_array[11]
#msinlet_smpphint is the 13th element of the preprocess_array array
msinlet_smpphint = preprocess_array[12]
A0 = l2_ph_calibration_table[0]
A1 = l2_ph_calibration_table[1]
A2 = l2_ph_calibration_table[2]
a0 = l2_ph_calibration_table[4]
a1 = l2_ph_calibration_table[3]
a2 = l2_ph_calibration_table[5]
pH = (A0 + (A1 * msinlet_temp) + (A2 * msinlet_temp**2)) * ((a2 * msinlet_smpphint**2) + (a1 * msinlet_smpphint) + a0 + 7)
if pH < 2 or pH > 12:
l2_msinlet_smpphint = -9999999.0
else:
l2_msinlet_smpphint = pH
return l2_msinlet_smpphint, msinlet_temp
def calc_l2_mswater_bkgphval(port_timestamp_bkgndint, L0_dissgas_bkgndint,
gas_mode_bkgndint, port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table,
l2_ph_calibration_table):
'''
Below are the steps for processing the auxiliary products MSINLET-TEMP,
MSINLET-SMPPHINT and MSINLET-BKGPHINT into the higher level auxiliary
products MSWATER-SMPPHVAL MSWATER-BKGPHVAL.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint,
L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table,
calibration_table)
#msinlet_temp is the 9th element of the preprocess_array array
msinlet_temp = preprocess_array[8]
#msinlet_bkgphint is the 10th element of the preprocess_array array
msinlet_bkgphint = preprocess_array[9]
A0 = l2_ph_calibration_table[0]
A1 = l2_ph_calibration_table[1]
A2 = l2_ph_calibration_table[2]
a0 = l2_ph_calibration_table[4]
a1 = l2_ph_calibration_table[3]
a2 = l2_ph_calibration_table[5]
pH = (A0 + (A1 * msinlet_temp) + (A2 * msinlet_temp**2)) * ((a2 * msinlet_bkgphint**2) + (a1 * msinlet_bkgphint) + a0 + 7)
if pH < 2 or pH > 12:
l2_msinlet_bkgphint = -9999999.0
else:
l2_msinlet_bkgphint = pH
return l2_msinlet_bkgphint, msinlet_temp
def calc_msinlet_smpphint(port_timestamp_sampleint, L0_dissgas_sampleint,
gas_mode_sampleint, port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
Sample pH intensity is output by a sensor onboard the MASSP instrument. It
is the pH signal intensity of the Sample Water at the time of
dissolved gas measurement.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint,
L0_dissgas_sampleint,
gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table,
calibration_table)
#msinlet_smpphint is the 13th element of the preprocess_array array
msinlet_smpphint = preprocess_array[12]
return msinlet_smpphint
def calc_msinlet_smpphint_timestamp(port_timestamp_sampleint,
L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
This is a wrapper function to calculate the MASSP timestamp while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint,
L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table,
calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_msinlet_bkgphint(port_timestamp_bkgndint, L0_dissgas_bkgndint,
gas_mode_bkgndint, port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
Background Water pH intensity is output by a sensor onboard the MASSP
instrument. It is the pH signal intensity of Background Water at
the time of dissolved gas measurement
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint,
L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table,
calibration_table)
#msinlet_bkgphint is the 10th element of the preprocess_array array
msinlet_bkgphint = preprocess_array[9]
return msinlet_bkgphint
def calc_msinlet_bkgphint_timestamp(port_timestamp_bkgndint,
L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
This is a wrapper function to calculate the MASSP timestamp while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint,
L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu,
ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table,
calibration_table)
#the nafion mode timestamp is the 11th element of the preprocess_array array
bkg_nafion_timestamp = preprocess_array[10]
return bkg_nafion_timestamp
def calc_msinlet_cal1phint(port_timestamp_calint01, L0_dissgas_calint01,
gas_mode_calint01, port_timestamp_calint01_mcu,
ph_meter_calint01_mcu, inlet_temp_calint01_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
Calibration Solution 1 pH intensity is output by a sensor onboard the MASSP
instrument. It is the pH signal intensity of Calibration Solution 1 at the
time of dissolved gas measurement
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01,
L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu,
ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table,
calibration_table)
#msinlet_cal1phint is the 5th element of the preprocess_array array
msinlet_cal1phint = preprocess_array[4]
return msinlet_cal1phint
def calc_msinlet_cal1phint_timestamp(port_timestamp_calint01,
L0_dissgas_calint01,
gas_mode_calint01,
port_timestamp_calint01_mcu,
ph_meter_calint01_mcu,
inlet_temp_calint01_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table):
'''
This is a wrapper function to calculate the MASSP timestamp while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01,
L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu,
ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table,
calibration_table)
#the nafion mode timestamp is the 6th element of the preprocess_array array
cal1_nafion_timestamp = preprocess_array[5]
return cal1_nafion_timestamp
def calc_msinlet_cal2phint(port_timestamp_calint02, L0_dissgas_calint02,
gas_mode_calint02, port_timestamp_calint02_mcu,
ph_meter_calint02_mcu, inlet_temp_calint02_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
Calibration Solution 2 pH intensity is output by a sensor onboard the MASSP
instrument. It is the pH signal intensity of Calibration Solution 2 at the
time of dissolved gas measurement
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table)
#msinlet_cal2phint is the 5th element of the preprocess_array array
msinlet_cal2phint = preprocess_array[4]
return msinlet_cal2phint
def calc_msinlet_cal2phint_timestamp(port_timestamp_calint02,
L0_dissgas_calint02,
gas_mode_calint02,
port_timestamp_calint02_mcu,
ph_meter_calint02_mcu,
inlet_temp_calint02_mcu,
massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu,
calibration_table):
'''
This is a wrapper function to calculate the MASSP timestamp while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02,
L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu,
ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table,
calibration_table)
#the direct mode timestamp is the 7th element of the preprocess_array array
cal2_direct_timestamp = preprocess_array[6]
return cal2_direct_timestamp
#Block of wrapper functions for calculating the nafion drier efficiency auxiliary data product and associated timestamp
def calc_smpnafeff(port_timestamp_sampleint, L0_dissgas_sampleint,
gas_mode_sampleint, port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
The auxiliary data product Nafion Drier Efficiency (NAFEFF) is an indicator
of the drying efficiency of the nafion drier. The efficiency is represented
as the percentage of water signal in nafion mode compared to direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint,
L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table,
calibration_table)
#smpnafeff is the 10th element of the preprocess_array array
smpnafeff = preprocess_array[9]
return smpnafeff
def calc_smpnafeff_timestamp(port_timestamp_sampleint, L0_dissgas_sampleint,
gas_mode_sampleint, port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass,
massp_rga_steps_per_amu, calibration_table):
'''
This is a wrapper function to calculate the MASSP timestamp while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass,
massp_rga_final_mass,
massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint,
L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu,
ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table,
calibration_table)
#the nafion mode timestamp is the 14th element of the preprocess_array array
smp_nafion_timestamp = preprocess_array[13]
return smp_nafion_timestamp
#Block of wrapper functions for calculating the L1 data products
def calc_dissgas_smpmethcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
This is a wrapper function to calculate the in situ concentration (uM)
of dissolved methane in the sample water as measured by the MASSP
instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz15 is the first element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 11th element of the preprocess_array array
average_temperature = preprocess_array[10]
smpmethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpmethcon = smpmethcon[0]
return smpmethcon
def calc_dissgas_smpethcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
This is a wrapper function to calculate the in situ concentration (uM)
of dissolved ethane in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz30 is the 5th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[4]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 4
last_column = 8
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smpethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpethcon = smpethcon[0]
return smpethcon
def calc_dissgas_smph2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
This is a wrapper function to calculate the in situ concentration (uM)
of dissolved hydrogen in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz2 is the 3rd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[2]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 8
last_column = 12
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smph2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smph2con = smph2con[0]
return smph2con
def calc_dissgas_smparcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved argon in the
sample water as measured by the MASSP instrument, while
in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz40 is the 8th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[7]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 12
last_column = 16
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smparcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smparcon = smparcon[0]
return smparcon
def calc_dissgas_smph2scon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved hydrogen
sulfide in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz34 is the 7th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[6]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 16
last_column = 20
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smph2scon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smph2scon = smph2scon[0]
return smph2scon
def calc_dissgas_smpo2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved oxygen
in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz32 is the 6th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[5]
#sample_mz34 is the 7th element of the preprocess_array array
deconvolution_variable = preprocess_array[6]
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 20
last_column = 24
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smpo2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpo2con = smpo2con[0]
return smpo2con
def calc_dissgas_smpco2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved carbon dioxide
in the sample water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz44 is the 9th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[8]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smpco2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpco2con = smpco2con[0]
return smpco2con
def calc_dissgas_bkgmethcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved methane in the background
water as measured by the MASSP instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz15 is the 2nd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[1]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 8th element of the preprocess_array array
average_temperature = preprocess_array[7]
bkgmethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgmethcon = bkgmethcon[0]
return bkgmethcon
def calc_dissgas_bkgethcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved ethane in the background
water as measured by the MASSP instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz30 is the 3rd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[2]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 4
last_column = 8
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgethcon = bkgethcon[0]
return bkgethcon
def calc_dissgas_bkgh2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved hydrogen
in the background water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz2 is the 1st element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 8
last_column = 12
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgh2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgh2con = bkgh2con[0]
return bkgh2con
def calc_dissgas_bkgarcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved argon
in the background water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz40 is the 6th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[5]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 12
last_column = 16
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgarcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgarcon = bkgarcon[0]
return bkgarcon
def calc_dissgas_bkgh2scon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved hydrogen sulfide
in the background water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz34 is the 5th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[4]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 16
last_column = 20
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgh2scon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgh2scon = bkgh2scon[0]
return bkgh2scon
def calc_dissgas_bkgo2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved oxygen
in the background water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz32 is the 4th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[3]
#sample_mz34 is the 5th element of the preprocess_array array
deconvolution_variable = preprocess_array[4]
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 20
last_column = 24
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgo2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgo2con = bkgo2con[0]
return bkgo2con
def calc_dissgas_bkgco2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved carbon dioxide
in the background water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz44 is the 7th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[6]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgco2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgco2con = bkgco2con[0]
return bkgco2con
def calc_dissgas_cal1methcon(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu, inlet_temp_calint01_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved methane in the Calibration
Solution 1 water as measured by the MASSP instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table, calibration_table)
#cal1_mz15 is the first element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 3rd element of the preprocess_array array
average_temperature = preprocess_array[2]
cal1methcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
cal1methcon = cal1methcon[0]
return cal1methcon
def calc_dissgas_cal1co2con(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu, inlet_temp_calint01_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved carbon dioxide in the Calibration
Solution 1 water as measured by the MASSP instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table, calibration_table)
#cal1_mz44 is the 2nd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[1]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 4th element of the preprocess_array array
average_temperature = preprocess_array[3]
cal1co2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
cal1co2con = cal1co2con[0]
return cal1co2con
def calc_dissgas_cal2methcon(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu, inlet_temp_calint02_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved methane in the Calibration
Solution 2 water as measured by the MASSP instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table)
#Cal2_mz15 is the first element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 3rd element of the preprocess_array array
average_temperature = preprocess_array[2]
cal2methcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
cal2methcon = cal2methcon[0]
return cal2methcon
def calc_dissgas_cal2co2con(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu, inlet_temp_calint02_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The in situ concentration (uM) of dissolved carbon dioxide in the Calibration
Solution 2 water as measured by the MASSP instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table)
#Cal2_mz44 is the 2nd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[1]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 4th element of the preprocess_array array
average_temperature = preprocess_array[3]
cal2co2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
cal2co2con = cal2co2con[0]
return cal2co2con
#Block of wrapper functions for calculating the timestamps of the L1 data products
def calc_timestamp_smpmethcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
This is a wrapper function to calculate the timestamp of the in situ concentration (uM)
of dissolved methane in the sample water as measured by the MASSP
instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#the nafion mode timestamp is the 14th element of the preprocess_array array
smp_nafion_timestamp = preprocess_array[13]
return smp_nafion_timestamp
def calc_timestamp_smpethcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
This is a wrapper function to calculate the timestamp of the in situ concentration (uM)
of dissolved ethane in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_timestamp_smph2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
This is a wrapper function to calculate the timestamp of the in situ concentration (uM)
of dissolved hydrogen in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_timestamp_smparcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved argon in the
sample water as measured by the MASSP instrument, while
in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_timestamp_smph2scon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved hydrogen
sulfide in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_timestamp_smpo2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved oxygen
in the sample water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_timestamp_smpco2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved carbon dioxide
in the sample water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 15th element of the preprocess_array array
smp_direct_timestamp = preprocess_array[14]
return smp_direct_timestamp
def calc_timestamp_bkgmethcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved methane in the background
water as measured by the MASSP instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#the nafion mode timestamp is the 11th element of the preprocess_array array
bkg_nafion_timestamp = preprocess_array[10]
return bkg_nafion_timestamp
def calc_timestamp_bkgethcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved ethane in the background
water as measured by the MASSP instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
def calc_timestamp_bkgh2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved hydrogen
in the background water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
def calc_timestamp_bkgarcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved argon
in the background water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
def calc_timestamp_bkgh2scon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved hydrogen sulfide
in the background water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
def calc_timestamp_bkgo2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved oxygen
in the background water as measured by the MASSP
instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
def calc_timestamp_bkgco2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved carbon dioxide
in the background water as measured by the MASSP instrument,
while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 12th element of the preprocess_array array
bkg_direct_timestamp = preprocess_array[11]
return bkg_direct_timestamp
def calc_timestamp_cal1methcon(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu, inlet_temp_calint01_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved methane in the Calibration
Solution 1 water as measured by the MASSP instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table, calibration_table)
#the nafion mode timestamp is the 6th element of the preprocess_array array
cal1_nafion_timestamp = preprocess_array[5]
return cal1_nafion_timestamp
def calc_timestamp_cal1co2con(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu, inlet_temp_calint01_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved carbon dioxide in the Calibration
Solution 1 water as measured by the MASSP instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 7th element of the preprocess_array array
cal1_direct_timestamp = preprocess_array[6]
return cal1_direct_timestamp
def calc_timestamp_cal2methcon(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu, inlet_temp_calint02_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved methane in the Calibration
Solution 2 water as measured by the MASSP instrument, while in Nafion mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table)
#the nafion mode timestamp is the 6th element of the preprocess_array array
cal2_nafion_timestamp = preprocess_array[5]
return cal2_nafion_timestamp
def calc_timestamp_cal2co2con(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu, inlet_temp_calint02_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu, calibration_table):
'''
The timestamp of the in situ concentration (uM) of dissolved carbon dioxide in the Calibration
Solution 2 water as measured by the MASSP instrument, while in Direct mode.
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table)
#the direct mode timestamp is the 7th element of the preprocess_array array
cal2_direct_timestamp = preprocess_array[6]
return cal2_direct_timestamp
#Block of wrapper functions for calculating the calibration ranges of the L1 data products
def calc_calrang_smpmethcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
The auxiliary data product MASSP Calibration Range (CALRANG) is the
collection of parameters associated with the quality status for each gas
concentration. A value of 0 indicates that both the intensity and
temperature used are within the calibration range. A value of -1 indicates
that the intensity used was below the minimum of the calibration range. A
value of 1 indicates that the intensity was higher than the maximum of the
calibration range, but the temperature was within the calibration range. A
value of 2 indicates that the intensity was within the calibration range,
but that the temperature was above the calibration range. A value of 3
indicates that both the intensity and the temperature were above the
calibration range.
Quality status for the Methane concentration in the sample water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz15 is the first element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 11th element of the preprocess_array array
average_temperature = preprocess_array[10]
smpmethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpmethcon = smpmethcon[1]
return smpmethcon
def calc_calrang_smpethcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Ethane concentration in the sample water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz30 is the 5th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[4]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 4
last_column = 8
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smpethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpethcon = smpethcon[1]
return smpethcon
def calc_calrang_smph2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Hydrogen concentration in the sample water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz2 is the 3rd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[2]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 8
last_column = 12
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smph2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smph2con = smph2con[1]
return smph2con
def calc_calrang_smparcon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Argon concentration in the sample water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz40 is the 8th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[7]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 12
last_column = 16
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smparcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smparcon = smparcon[1]
return smparcon
def calc_calrang_smph2scon(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the H2S concentration in the sample water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz34 is the 7th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[6]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 16
last_column = 20
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smph2scon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smph2scon = smph2scon[1]
return smph2scon
def calc_calrang_smpo2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the oxygen concentration in the sample water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz32 is the 6th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[5]
#sample_mz34 is the 7th element of the preprocess_array array
deconvolution_variable = preprocess_array[6]
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 20
last_column = 24
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smpo2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpo2con = smpo2con[1]
return smpo2con
def calc_calrang_smpco2con(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the CO2 concentration in the sample water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu,
inlet_temp_sampleint_mcu, mass_table, calibration_table)
#sample_mz44 is the 9th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[8]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 12th element of the preprocess_array array
average_temperature = preprocess_array[11]
smpco2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
smpco2con = smpco2con[1]
return smpco2con
def calc_calrang_bkgmethcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Methane concentration in the background water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz15 is the 2nd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[1]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 8th element of the preprocess_array array
average_temperature = preprocess_array[7]
bkgmethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgmethcon = bkgmethcon[1]
return bkgmethcon
def calc_calrang_bkgethcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Ethane concentration in the background water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz30 is the 3rd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[2]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 4
last_column = 8
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgethcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgethcon = bkgethcon[1]
return bkgethcon
def calc_calrang_bkgh2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Hydrogen concentration in the background water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz2 is the 1st element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 8
last_column = 12
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgh2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgh2con = bkgh2con[1]
return bkgh2con
def calc_calrang_bkgarcon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Argon concentration in the background water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz40 is the 6th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[5]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 12
last_column = 16
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgarcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgarcon = bkgarcon[1]
return bkgarcon
def calc_calrang_bkgh2scon(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the H2S concentration in the background water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz34 is the 5th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[4]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 16
last_column = 20
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgh2scon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgh2scon = bkgh2scon[1]
return bkgh2scon
def calc_calrang_bkgo2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the oxygen concentration in the background water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz32 is the 4th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[3]
#sample_mz34 is the 5th element of the preprocess_array array
deconvolution_variable = preprocess_array[4]
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 20
last_column = 24
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgo2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgo2con = bkgo2con[1]
return bkgo2con
def calc_calrang_bkgco2con(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the CO2 concentration in the background water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu,
inlet_temp_bkgndint_mcu, mass_table, calibration_table)
#sample_mz44 is the 7th element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[6]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 9th element of the preprocess_array array
average_temperature = preprocess_array[8]
bkgco2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
bkgco2con = bkgco2con[1]
return bkgco2con
def calc_calrang_cal1methcon(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu, inlet_temp_calint01_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Methane concentration in the calibration fluid 1 water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table, calibration_table)
#cal1_mz15 is the first element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 3rd element of the preprocess_array array
average_temperature = preprocess_array[2]
ca1methcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
ca1methcon = ca1methcon[1]
return ca1methcon
def calc_calrang_cal1co2con(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu, inlet_temp_calint01_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the CO2 concentration in the calibration fluid 1 water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal1PreProcess(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table, calibration_table)
#cal1_mz44 is the 2nd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[1]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 4th element of the preprocess_array array
average_temperature = preprocess_array[3]
ca1co2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
ca1co2con = ca1co2con[1]
return ca1co2con
def calc_calrang_cal2methcon(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu, inlet_temp_calint02_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the Methane concentration in the calibration fluid 2 water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table)
#Cal2_mz15 is the first element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[0]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 0
last_column = 4
#average inlet temperature (nafion mode) is the 3rd element of the preprocess_array array
average_temperature = preprocess_array[2]
ca2methcon = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
ca2methcon = ca2methcon[1]
return ca2methcon
def calc_calrang_cal2co2con(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu, inlet_temp_calint02_mcu,
massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu,
calibration_table, sensor_depth):
'''
Quality status for the CO2 concentration in the calibration fluid 2 water
'''
mass_table = rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu)
preprocess_array = Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table)
#Cal2_mz44 is the 2nd element of the preprocess_array array
intermediate_mass_ratio = preprocess_array[1]
deconvolution_variable = 0
#first and last column for this particular gas in the calibration table
#This information is in table 1 of the DPS
first_column = 24
last_column = 28
#average inlet temperature (direct mode) is the 4th element of the preprocess_array array
average_temperature = preprocess_array[3]
ca2co2con = gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature)
#the first element of the array if the gas conc. the second is the calrange
ca2co2con = ca2co2con[1]
return ca2co2con
#Block of subfunctions called by the above wrapper functions that calculate the L1 and auxiliary data products
def gas_concentration(intermediate_mass_ratio, deconvolution_variable, calibration_table,
first_column, last_column, sensor_depth, average_temperature):
'''
This sub-routine takes in a column. range from Table 1 (refered as to c1, c2, c3, c4),
a corrected intensity (referred as x, from the Deconvolution subroutine), an averaged
temperature (referred as T, see Table 1), the pressure P of the sampling site, and
calculate the final concentration used for L1 DISSGAS data products. This subroutine
also assigns a value to the corresponding CALRANG parameter (see Table 1) identifying
the quality of the concentration value (indicate if it is out of calibration range
for concentration and/or temperature). The subroutine also uses a temporary variable,
tempCalRang used to compute the final value of CALRANG.
The following vars will be hard coded for each of the wrapper functions
deconvolution_variable, calibration_table, first_column, last_column
'''
#Converth depth (meters) to pressure (psi)
pressure = (sensor_depth * 0.099204 + 1) * 14.695
#extract the four columns of the cal table that I need for a particular gas.
calibration_table = calibration_table[:, first_column:last_column]
#Check to see if one of the 4 calibration temperatures == the averaged inlet temperature
ind = np.where(calibration_table[0, :] == average_temperature)[0]
if np.size(ind) == 1:
ct1 = np.where(calibration_table[0, :] == average_temperature)[0][0]
tempCalRange = 1
#Check to see if the averaged inlet temperature is greater than the highest calibration temperatures
elif average_temperature >= calibration_table[0, 3]:
ct1 = 3
tempCalRange = 2
#Otherwise figure out which two columns in the calibration table are needed.
else:
#find first column
ct1 = np.where(calibration_table[0, :] < average_temperature)[0][-1]
#find second column
ct2 = np.where(calibration_table[0, :] > average_temperature)[0][0]
concT2_flag = 1
tempCalRange = 0
corrected_intensity = deconvolution_correction(intermediate_mass_ratio, deconvolution_variable, calibration_table)
#Check to see if the corrected intensity falls within the calibration values
#Minimum values, row 4 in the cal table
if corrected_intensity < calibration_table[3, ct1] or corrected_intensity < calibration_table[3, ct2]:
calrange = -1
#Maximum values, row 5 in the cal table
elif corrected_intensity > calibration_table[4, ct1] or corrected_intensity > calibration_table[4, ct2]:
calrange = tempCalRange + 1
else:
calrange = tempCalRange
#P0 is row 6 (with row 0 being the first row) in the cal table
if corrected_intensity < calibration_table[5, ct1]:
alpha = calibration_table[6, ct1] + (calibration_table[7, ct1] * pressure) + (calibration_table[8, ct1] * pressure**2) + (calibration_table[9, ct1] * pressure**3)
beta = calibration_table[14, ct1] + (calibration_table[15, ct1] * pressure) + (calibration_table[16, ct1] * pressure**2) + (calibration_table[17, ct1] * pressure**3)
delta = calibration_table[22, ct1] + (calibration_table[23, ct1] * pressure) + (calibration_table[24, ct1] * pressure**2) + (calibration_table[25, ct1] * pressure**3)
gamma = calibration_table[28, ct1] + (calibration_table[29, ct1] * pressure) + (calibration_table[30, ct1] * pressure**2) + (calibration_table[31, ct1] * pressure**3)
zeta = calibration_table[36, ct1] + (calibration_table[37, ct1] * pressure) + (calibration_table[38, ct1] * pressure**2) + (calibration_table[39, ct1] * pressure**3)
elif corrected_intensity >= calibration_table[5, ct1]:
alpha = calibration_table[10, ct1] + (calibration_table[11, ct1] * pressure) + (calibration_table[12, ct1] * pressure**2) + (calibration_table[13, ct1] * pressure**3)
beta = calibration_table[18, ct1] + (calibration_table[19, ct1] * pressure) + (calibration_table[20, ct1] * pressure**2) + (calibration_table[21, ct1] * pressure**3)
delta = calibration_table[26, ct1] * np.exp(calibration_table[26, ct1] * pressure)
gamma = calibration_table[32, ct1] + (calibration_table[33, ct1] * pressure) + (calibration_table[34, ct1] * pressure**2) + (calibration_table[35, ct1] * pressure**3)
zeta = calibration_table[40, ct1] + (calibration_table[41, ct1] * pressure) + (calibration_table[42, ct1] * pressure**2) + (calibration_table[43, ct1] * pressure**3)
#Calculate concT1
concT1 = (alpha * corrected_intensity**2) + (beta * corrected_intensity) + (delta * np.exp(zeta * corrected_intensity)) + gamma
if concT2_flag == 1:
if corrected_intensity < calibration_table[5, ct2]:
alpha = calibration_table[6, ct2] + (calibration_table[7, ct2] * pressure) + (calibration_table[8, ct2] * pressure**2) + (calibration_table[9, ct2] * pressure**3)
beta = calibration_table[14, ct2] + (calibration_table[15, ct2] * pressure) + (calibration_table[16, ct2] * pressure**2) + (calibration_table[17, ct2] * pressure**3)
delta = calibration_table[22, ct2] + (calibration_table[23, ct2] * pressure) + (calibration_table[24, ct2] * pressure**2) + (calibration_table[25, ct2] * pressure**3)
gamma = calibration_table[28, ct2] + (calibration_table[29, ct2] * pressure) + (calibration_table[30, ct2] * pressure**2) + (calibration_table[31, ct2] * pressure**3)
zeta = calibration_table[36, ct2] + (calibration_table[37, ct2] * pressure) + (calibration_table[38, ct2] * pressure**2) + (calibration_table[39, ct2] * pressure**3)
elif corrected_intensity >= calibration_table[5, ct2]:
alpha = calibration_table[10, ct2] + (calibration_table[11, ct2] * pressure) + (calibration_table[12, ct2] * pressure**2) + (calibration_table[13, ct2] * pressure**3)
beta = calibration_table[18, ct2] + (calibration_table[19, ct2] * pressure) + (calibration_table[20, ct2] * pressure**2) + (calibration_table[21, ct2] * pressure**3)
delta = calibration_table[26, ct2] * np.exp(calibration_table[26, ct2] * pressure)
gamma = calibration_table[32, ct2] + (calibration_table[33, ct2] * pressure) + (calibration_table[34, ct2] * pressure**2) + (calibration_table[35, ct2] * pressure**3)
zeta = calibration_table[40, ct2] + (calibration_table[41, ct2] * pressure) + (calibration_table[42, ct2] * pressure**2) + (calibration_table[43, ct2] * pressure**3)
#Calculate concT2
concT2 = (alpha * corrected_intensity**2) + (beta * corrected_intensity) + (delta * np.exp(zeta * corrected_intensity)) + gamma
#Calculate concT
concT = concT1 + ((concT2 - concT1) * (average_temperature - calibration_table[0, ct1])) / (calibration_table[0, ct2] - calibration_table[0, ct1])
else:
#Calculate concT
concT = concT1
if calrange == -1:
final_conc = 0
else:
final_conc = calibration_table[44, ct1] * (concT - calibration_table[45, ct1])
return final_conc, calrange
def average_mz(mz, data_in, mass_table, window):
'''
This subroutine takes in a mass-to-charge ratio mz, a subset of n scans
and the mass_table and returns an intermediate mz (mass-to-charge) ratio.
'''
#find mz +/- window in the mass_table. The window value comes from the L1 Cal Table
mz_ind = np.where((mass_table >= mz - window) & (mass_table <= mz + window))
#subset the data_in array so that we are just dealing with the mz values
#within the time period of interest
temp_array = np.array(data_in[:, mz_ind])
temp_array = np.squeeze(temp_array)
#sort the array so that I can find the median of the three highest
#values for each scan
temp_array = np.sort(temp_array)
#grab the median values
median_array = temp_array[:, -2]
#find and replace any negative values with zero
median_ind = np.where(median_array < 0)
median_array[median_ind] = 0
#calculate the mean of the median values
intermediate_mass_ratio = np.nanmean(median_array)
return intermediate_mass_ratio
def deconvolution_correction(intermediate_mass_ratio, deconvolution_variable, calibration_table):
'''
This sub-routine takes in a main variable (intermediate_mass_ratio: see DPS Table 1), a
second variable (deconvolution_variable: see DPS Table 1), and a calibration lookup
table (DPS Table 2) and calculates a corrected intensity.
'''
#Equ 4 on page 13 of the DPS
corrected_intensity = intermediate_mass_ratio - (calibration_table[2, 0] * deconvolution_variable) - calibration_table[1, 0]
return corrected_intensity
def rga_status_process(massp_rga_initial_mass, massp_rga_final_mass, massp_rga_steps_per_amu):
'''
This subroutine takes in the values of rga_final_mass, rga_initial_mass, and
rga_steps_per_amu, calculates the value for Tnb (Total number of values) and
returns a table of the masses.
'''
Tnb = np.int(((massp_rga_final_mass - massp_rga_initial_mass) * massp_rga_steps_per_amu) + 1)
mass_table = np.ones(Tnb)
mass_table[0] = massp_rga_initial_mass
for x in range(1, Tnb):
mass_table[x] = mass_table[x-1] + (1 / np.float(massp_rga_steps_per_amu))
mass_table = np.around(mass_table, decimals=1)
return mass_table
def SamplePreProcess(port_timestamp_sampleint, L0_dissgas_sampleint, gas_mode_sampleint,
port_timestamp_sampleint_mcu, ph_meter_sampleint_mcu, inlet_temp_sampleint_mcu,
mass_table, calibration_table):
'''
This subroutine takes in L0 DISSGAS-SAMPLEINT and produces
intermediary variables sample-mz2, sample-mz18, sample-mz30,
sample-mz32, sample-mz40, sample-mz44, sample-mz15 and
sample-mz18Naf, sample-Tnaf, sample-Tdir as well as
MSINLET-SMPPHINT AUX data products. This subroutine groups
the scans into two subsets corresponding to nafion and direct
mode, and then extracts the mass data needed for L1 computations.
Definitions:
port_timestamp = time stamps associated with L0_dissgas_sampleint,
gas_mode and sample_mode.
L0_dissgas_sampleint = mass spectral data set for sample fluid
(array, 10-16 ampere).
gas_mode = The auxiliary data product Gas Measurement Mode
(GASMODE) indicates the operating mode of the MASSP
instrument and can have integer values of 0 and 1 for Direct
and Nafion modes, respectively, or value of -1 if the instrument
is in another operating mode.
inlet_temp = Sample Temperature (oC) is output by a sensor onboard the MASSP instrument.
It is the temperature of the Sample Water at the time of dissolved gas measurement.
The value is set to -9999 when the instrument is not sampling.
ph_meter = Sample pH intensity is output by a sensor onboard the MASSP instrument.
It is the pH signal intensity (no unit) of the Sample Water at the time of dissolved
gas measurement.
mz = Mass to charge ratio
sample-mz2 = Intensity returned by the averaging subroutine for mz 2 within DISSGAS-SAMPLEINT
sample-mz15 = Intensity returned by the averaging subroutine for mz 15 within DISSGAS-SAMPLEINT
sample-mz18Naf = Intensity returned by the averaging subroutine for mz 18 in nafion mode within DISSGAS-SAMPLEINT
sample-mz18 = Intensity returned by the averaging subroutine for mz 18 within DISSGAS-SAMPLEINT
sample-mz30 = Intensity returned by the averaging subroutine for mz 30 within DISSGAS-SAMPLEINT
sample-mz32 = Intensity returned by the averaging subroutine for mz 32 within DISSGAS-SAMPLEINT
sample-mz34 = Intensity returned by the averaging subroutine for mz 34 within DISSGAS-SAMPLEINT
sample-mz40 = Intensity returned by the averaging subroutine for mz 40 within DISSGAS-SAMPLEINT
sample-mz44 = Intensity returned by the averaging subroutine for mz 44 within DISSGAS-SAMPLEINT
sample-Tdir = Averaged temperature in Sample fluid direct mode
sample-Tnaf = Averaged temperature in Sample fluid nafion mode
nafeff = The auxiliary data product Nafion Drier Efficiency (NAFEFF)
is an indicator of the drying efficiency of the nafion drier. The
efficiency is represented as the percentage of water signal in nafion
mode compared to direct mode.
'''
#replace bad data with nans
inlet_temp_sampleint_mcu[inlet_temp_sampleint_mcu == -127] = np.nan
inlet_temp_sampleint_mcu[inlet_temp_sampleint_mcu == 85] = np.nan
#find gas_mode_sampleint == 0 (direct mode)
ind_direct = np.where(gas_mode_sampleint == 0)[0]
Tchange = port_timestamp_sampleint_mcu[ind_direct[0]]
Tlast = port_timestamp_sampleint_mcu[ind_direct[-1]]
#find gas_mode_sampleint == 1 (nafion mode)
ind_nafion = np.where(gas_mode_sampleint == 1)[0]
TlastScanNafion = port_timestamp_sampleint_mcu[ind_nafion[-1]]
#ID timestamp closest to TlastScanNafion - 180
idx = (np.abs(port_timestamp_sampleint-(TlastScanNafion - 180))).argmin()
#subset the data collected in nafion mode
nafion_samples_ind = np.where((port_timestamp_sampleint >= port_timestamp_sampleint[idx]) & (port_timestamp_sampleint <= TlastScanNafion))
nafion_samples = np.squeeze(np.array(L0_dissgas_sampleint[nafion_samples_ind, :]))
#DPS says to exclude the last scan at TlastScanNafion. This subsetted array then gets fed into the ave. routine.
nafion_samples = nafion_samples[:-1, ]
#calculate nafion mode timestamp
nafion_mode_timestamp = np.squeeze(np.array(port_timestamp_sampleint[nafion_samples_ind]))
#DPS says to exclude the last scan at TlastScanNafion.
nafion_mode_timestamp = np.around(np.nanmean(nafion_mode_timestamp[:-1, ]))
mass_charge_ratio = 15
window = round(calibration_table[-1, 0], 1)
sample_mz15 = average_mz(mass_charge_ratio, nafion_samples, mass_table, window)
mass_charge_ratio = 18
#not sure that this window of 0.5 is OK but the 18mz window is not specified in the cal table
window = round(calibration_table[-1, 8], 1)
sample_mz18naf = average_mz(mass_charge_ratio, nafion_samples, mass_table, window)
#average MSINLET-TEMP for nafion time period
nafion_samples_ind = np.squeeze(np.where((port_timestamp_sampleint_mcu >= port_timestamp_sampleint[idx]) & (port_timestamp_sampleint_mcu <= TlastScanNafion)))
sample_Tnaf = np.nanmean(inlet_temp_sampleint_mcu[nafion_samples_ind[:-1]])
#ID timestamp closest to Tlast - 180
idx = (np.abs(port_timestamp_sampleint-(Tlast - 180))).argmin()
#subset the data collected in direct mode
direct_samples_ind = np.where((port_timestamp_sampleint >= port_timestamp_sampleint[idx]) & (port_timestamp_sampleint <= Tlast))
direct_samples = np.squeeze(np.array(L0_dissgas_sampleint[direct_samples_ind, :]))
#calculate direct mode timestamp
direct_mode_timestamp = np.array(port_timestamp_sampleint[direct_samples_ind])
direct_mode_timestamp = np.around(np.nanmean(np.squeeze(direct_mode_timestamp)))
mass_charge_ratio = 2
window = round(calibration_table[-1, 8], 1)
sample_mz2 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 18
#not sure that this window is true but the window is not specified in the cal table
window = round(calibration_table[-1, 8], 1)
sample_mz18 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 30
window = round(calibration_table[-1, 4], 1)
sample_mz30 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 32
window = round(calibration_table[-1, 20], 1)
sample_mz32 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 34
window = round(calibration_table[-1, 16], 1)
sample_mz34 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 40
window = round(calibration_table[-1, 12], 1)
sample_mz40 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 44
window = round(calibration_table[-1, 24], 1)
sample_mz44 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
#average MSINLET-TEMP for direct time period here, call it sample-Tdir
direct_samples_ind = np.where((port_timestamp_sampleint_mcu >= port_timestamp_sampleint[idx]) & (port_timestamp_sampleint_mcu <= Tlast))
sample_Tdir = np.nanmean(inlet_temp_sampleint_mcu[direct_samples_ind])
#average ph_meter_value for time period Tlast-1min:Tlast, call it msinlet_smpphint
direct_samples_ind = np.where((port_timestamp_sampleint_mcu >= Tlast - 60) & (port_timestamp_sampleint_mcu <= Tlast))
msinlet_smpphint = np.absolute(np.nanmean(ph_meter_sampleint_mcu[direct_samples_ind]))
#Calculate NAFEFF, which is an indicator of the drying efficiency of the nafion drier
nafeff = np.int(100 * (sample_mz18naf / sample_mz18))
return (sample_mz15, sample_mz18naf, sample_mz2, sample_mz18, sample_mz30,
sample_mz32, sample_mz34, sample_mz40, sample_mz44, nafeff, sample_Tnaf,
sample_Tdir, msinlet_smpphint, nafion_mode_timestamp, direct_mode_timestamp)
def BackgroundPreProcess(port_timestamp_bkgndint, L0_dissgas_bkgndint, gas_mode_bkgndint,
port_timestamp_bkgndint_mcu, ph_meter_bkgndint_mcu, inlet_temp_bkgndint_mcu,
mass_table, calibration_table):
'''
This subroutine takes in L0 BKGNDINT and produces intermediary
variables bckgnd-mz2, bckgnd-mz30, bckgnd -mz32, bckgnd -mz40,
bckgnd -mz44, bckgnd -mz15, bckgnd-Tnaf, bckgnd-Tdir, as well
as MSINLET-BKGPHINT AUX data products. This subroutine groups
the scans into two subsets corresponding to nafion and direct
mode, and then extracts the mass data needed for L1 computations.
'''
#replace bad data with nans
inlet_temp_bkgndint_mcu[inlet_temp_bkgndint_mcu == -127] = np.nan
inlet_temp_bkgndint_mcu[inlet_temp_bkgndint_mcu == 85] = np.nan
#find gas_mode_bkgndint == 0 (direct mode)
ind_direct = np.where(gas_mode_bkgndint == 0)[0]
Tchange = port_timestamp_bkgndint_mcu[ind_direct[0]]
Tlast = port_timestamp_bkgndint_mcu[ind_direct[-1]]
#ID timestamp closest to Tlast - 180
idx = (np.abs(port_timestamp_bkgndint-(Tlast - 180))).argmin()
#subset the data collected in direct mode
direct_samples_ind = np.where((port_timestamp_bkgndint >= port_timestamp_bkgndint[idx]) & (port_timestamp_bkgndint <= Tlast))
direct_samples = np.squeeze(np.array(L0_dissgas_bkgndint[direct_samples_ind, :]))
#DPS says to exclude the last scan at TlastScanDirect. This subsetted array then gets fed into the ave. routine.
direct_samples = direct_samples[:-1, ]
#calculate direct mode timestamp
direct_mode_timestamp = np.squeeze(np.array(port_timestamp_bkgndint[direct_samples_ind]))
#DPS says to exclude the last scan at TlastScanDirect.
direct_mode_timestamp = np.around(np.nanmean(direct_mode_timestamp[:-1, ]))
mass_charge_ratio = 2
window = round(calibration_table[-1, 8], 1)
bckgnd_mz2 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 30
window = round(calibration_table[-1, 4], 1)
bckgnd_mz30 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 32
window = round(calibration_table[-1, 20], 1)
bckgnd_mz32 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 34
window = round(calibration_table[-1, 16], 1)
bckgnd_mz34 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 40
window = round(calibration_table[-1, 12], 1)
bckgnd_mz40 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
mass_charge_ratio = 44
window = round(calibration_table[-1, 24], 1)
bckgnd_mz44 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
#average MSINLET-TEMP for direct time period here, call it bckgnd-Tdir
direct_samples_ind = np.squeeze(np.where((port_timestamp_bkgndint_mcu >= port_timestamp_bkgndint[idx]) & (port_timestamp_bkgndint_mcu <= Tlast)))
bckgnd_Tdir = np.nanmean(inlet_temp_bkgndint_mcu[direct_samples_ind[:-1]])
#find gas_mode_bkgndint == 1 (nafion mode)
ind_nafion = np.where(gas_mode_bkgndint == 1)[0]
TlastScanNafion = port_timestamp_bkgndint_mcu[ind_nafion[-1]]
#ID timestamp closest to TlastScanNafion - 180
idx = (np.abs(port_timestamp_bkgndint-(TlastScanNafion - 180))).argmin()
#subset the data collected in nafion mode
nafion_samples_ind = np.where((port_timestamp_bkgndint >= port_timestamp_bkgndint[idx]) & (port_timestamp_bkgndint <= TlastScanNafion))
nafion_samples = np.squeeze(np.array(L0_dissgas_bkgndint[nafion_samples_ind, :]))
#calculate nafion mode timestamp
nafion_mode_timestamp = np.array(port_timestamp_bkgndint[nafion_samples_ind])
nafion_mode_timestamp = np.around(np.nanmean(np.squeeze(nafion_mode_timestamp)))
mass_charge_ratio = 15
window = round(calibration_table[-1, 0], 1)
bckgnd_mz15 = average_mz(mass_charge_ratio, nafion_samples, mass_table, window)
#average MSINLET-TEMP for nafion time period here, call it bckgnd-Tnaf
nafion_samples_ind = np.where((port_timestamp_bkgndint_mcu >= port_timestamp_bkgndint[idx]) & (port_timestamp_bkgndint_mcu <= TlastScanNafion))
bckgnd_Tnaf = np.nanmean(inlet_temp_bkgndint_mcu[nafion_samples_ind])
#average ph_meter_value for time period TlastScanNafion-1min:TlastScanNafion, call it msinlet_bkgphint
nafion_samples_ind = np.where((port_timestamp_bkgndint_mcu >= TlastScanNafion - 60) & (port_timestamp_bkgndint_mcu <= TlastScanNafion))
msinlet_bkgphint = np.absolute(np.nanmean(ph_meter_bkgndint_mcu[nafion_samples_ind]))
return (bckgnd_mz2, bckgnd_mz15, bckgnd_mz30, bckgnd_mz32, bckgnd_mz34,
bckgnd_mz40, bckgnd_mz44, bckgnd_Tnaf, bckgnd_Tdir, msinlet_bkgphint,
nafion_mode_timestamp, direct_mode_timestamp)
def Cal1PreProcess(port_timestamp_calint01, L0_dissgas_calint01, gas_mode_calint01,
port_timestamp_calint01_mcu, ph_meter_calint01_mcu,
inlet_temp_calint01_mcu, mass_table, calibration_table):
'''
This subroutine takes in L0 DISSGAS-CALINT01 and produces
intermediary variables cal1-mz44, cal1-mz15, cal1-Tnaf,
cal1-Tdir as well as MSINLET-CA1PHINT AUX data product.
This subroutine groups the scans into two subsets
corresponding to nafion and direct mode, and then extracts
the mass data needed for L1 computations. This subroutine
is very similar to the BackgroundPreProcess subroutine,
with just different intermediary variable assignations.
'''
#replace bad data with nans
inlet_temp_calint01_mcu[inlet_temp_calint01_mcu == -127] = np.nan
inlet_temp_calint01_mcu[inlet_temp_calint01_mcu == 85] = np.nan
#find gas_mode_calint01 == 0 (direct mode)
ind_direct = np.where(gas_mode_calint01 == 0)[0]
Tchange = port_timestamp_calint01_mcu[ind_direct[0]]
Tlast = port_timestamp_calint01_mcu[ind_direct[-1]]
#ID timestamp closest to Tlast - 60
idx = (np.abs(port_timestamp_calint01-(Tlast - 60))).argmin()
#subset the data collected in direct mode
direct_samples_ind = np.where((port_timestamp_calint01 >= port_timestamp_calint01[idx]) & (port_timestamp_calint01 <= Tlast))
direct_samples = np.squeeze(np.array(L0_dissgas_calint01[direct_samples_ind, :]))
#DPS says to exclude the last scan at TlastScanDirect. This subsetted array then gets fed into the ave. routine.
direct_samples = direct_samples[:-1, ]
#calculate direct mode timestamp
direct_mode_timestamp = np.squeeze(np.array(port_timestamp_calint01[direct_samples_ind]))
#DPS says to exclude the last scan at TlastScanDirect.
direct_mode_timestamp = np.around(np.nanmean(direct_mode_timestamp[:-1, ]))
mass_charge_ratio = 44
window = round(calibration_table[-1, 24], 1)
cal1_mz44 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
#average MSINLET-TEMP for direct time period here, call it cal1-Tdir
direct_samples_ind = np.squeeze(np.where((port_timestamp_calint01_mcu >= port_timestamp_calint01[idx]) & (port_timestamp_calint01_mcu <= Tlast)))
cal1_Tdir = np.nanmean(inlet_temp_calint01_mcu[direct_samples_ind[:-1]])
#find gas_mode_calint01 == 1 (nafion mode)
ind_nafion = np.where(gas_mode_calint01 == 1)[0]
TlastScanNafion = port_timestamp_calint01_mcu[ind_nafion[-1]]
#ID timestamp closest to TlastScanNafion - 60
idx = (np.abs(port_timestamp_calint01-(TlastScanNafion - 60))).argmin()
#subset the data collected in nafion mode
nafion_samples_ind = np.where((port_timestamp_calint01 >= port_timestamp_calint01[idx]) & (port_timestamp_calint01 <= TlastScanNafion))
nafion_samples = np.squeeze(np.array(L0_dissgas_calint01[nafion_samples_ind, :]))
#calculate nafion mode timestamp
nafion_mode_timestamp = np.array(port_timestamp_calint01[nafion_samples_ind])
nafion_mode_timestamp = np.around(np.nanmean(np.squeeze(nafion_mode_timestamp)))
mass_charge_ratio = 15
window = round(calibration_table[-1, 0], 1)
cal1_mz15 = average_mz(mass_charge_ratio, nafion_samples, mass_table, window)
#average MSINLET-TEMP for nafion time period here, call it cal1-Tnaf
nafion_samples_ind = np.where((port_timestamp_calint01_mcu >= port_timestamp_calint01[idx]) & (port_timestamp_calint01_mcu <= TlastScanNafion))
cal1_Tnaf = np.nanmean(inlet_temp_calint01_mcu[nafion_samples_ind])
#average ph_meter_value for time period TlastScanNafion-1min:TlastScanNafion, call it msinlet_cal1phint
nafion_samples_ind = np.where((port_timestamp_calint01_mcu >= TlastScanNafion - 60) & (port_timestamp_calint01_mcu <= TlastScanNafion))
msinlet_cal1phint = np.absolute(np.nanmean(ph_meter_calint01_mcu[nafion_samples_ind]))
return (cal1_mz15, cal1_mz44, cal1_Tnaf, cal1_Tdir, msinlet_cal1phint,
nafion_mode_timestamp, direct_mode_timestamp)
def Cal2PreProcess(port_timestamp_calint02, L0_dissgas_calint02, gas_mode_calint02,
port_timestamp_calint02_mcu, ph_meter_calint02_mcu,
inlet_temp_calint02_mcu, mass_table, calibration_table):
'''
This subroutine takes in L0 DISSGAS-CALINT02 and produces
intermediary variables cal2-mz44, cal2-mz15, cal2-Tnaf,
cal2-Tdir as well as MSINLET-CA2PHINT AUX data product.
This subroutine groups the scans into two subsets
corresponding to nafion and direct mode, and then extracts
the mass data needed for L1 computations. This subroutine
is very similar to the SamplePreProcess subroutine, with
just different intermediary variable assignations.
'''
#replace bad data with nans
inlet_temp_calint02_mcu[inlet_temp_calint02_mcu == -127] = np.nan
inlet_temp_calint02_mcu[inlet_temp_calint02_mcu == 85] = np.nan
#find gas_mode_calint02 == 0 (direct mode)
ind_direct = np.where(gas_mode_calint02 == 0)[0]
Tchange = port_timestamp_calint02_mcu[ind_direct[0]]
Tlast = port_timestamp_calint02_mcu[ind_direct[-1]]
#find gas_mode_calint02 == 1 (nafion mode)
ind_nafion = np.where(gas_mode_calint02 == 1)[0]
TlastScanNafion = port_timestamp_calint02_mcu[ind_nafion[-1]]
#ID timestamp closest to TlastScanNafion - 60
idx = (np.abs(port_timestamp_calint02-(TlastScanNafion - 60))).argmin()
#subset the data collected in nafion mode
nafion_samples_ind = np.where((port_timestamp_calint02 >= port_timestamp_calint02[idx]) & (port_timestamp_calint02 <= TlastScanNafion))
nafion_samples = np.squeeze(np.array(L0_dissgas_calint02[nafion_samples_ind, :]))
#DPS says to exclude the last scan at TlastScanNafion. This subsetted array then gets fed into the ave. routine.
nafion_samples = nafion_samples[:-1, ]
#calculate nafion mode timestamp
nafion_mode_timestamp = np.squeeze(np.array(port_timestamp_calint02[nafion_samples_ind]))
#DPS says to exclude the last scan at TlastScanNafion.
nafion_mode_timestamp = np.around(np.nanmean(nafion_mode_timestamp[:-1, ]))
mass_charge_ratio = 15
window = round(calibration_table[-1, 0], 1)
cal2_mz15 = average_mz(mass_charge_ratio, nafion_samples, mass_table, window)
#average MSINLET-TEMP for nafion time period
nafion_samples_ind = np.squeeze(np.where((port_timestamp_calint02_mcu >= port_timestamp_calint02[idx]) & (port_timestamp_calint02_mcu <= TlastScanNafion)))
cal2_Tnaf = np.nanmean(inlet_temp_calint02_mcu[nafion_samples_ind[:-1]])
#ID timestamp closest to Tlast - 60
idx = (np.abs(port_timestamp_calint02-(Tlast - 60))).argmin()
#subset the data collected in direct mode
direct_samples_ind = np.where((port_timestamp_calint02 >= port_timestamp_calint02[idx]) & (port_timestamp_calint02 <= Tlast))
direct_samples = np.squeeze(np.array(L0_dissgas_calint02[direct_samples_ind, :]))
#calculate direct mode timestamp
direct_mode_timestamp = np.array(port_timestamp_calint02[direct_samples_ind])
direct_mode_timestamp = np.around(np.nanmean(np.squeeze(direct_mode_timestamp)))
mass_charge_ratio = 44
window = round(calibration_table[-1, 24], 1)
cal2_mz44 = average_mz(mass_charge_ratio, direct_samples, mass_table, window)
#average MSINLET-TEMP for direct time period here, call it cal2-Tdir
direct_samples_ind = np.where((port_timestamp_calint02_mcu >= port_timestamp_calint02[idx]) & (port_timestamp_calint02_mcu <= Tlast))
cal2_Tdir = np.nanmean(inlet_temp_calint02_mcu[direct_samples_ind])
#average ph_meter_value for time period Tlast-1min:Tlast, call it msinlet_cal2phint
direct_samples_ind = np.where((port_timestamp_calint02_mcu >= Tlast - 60) & (port_timestamp_calint02_mcu <= Tlast))
msinlet_cal2phint = np.absolute(np.nanmean(ph_meter_calint02_mcu[direct_samples_ind]))
#associate msinlet_smpphint with cal2_mz44 time stamp
return (cal2_mz15, cal2_mz44, cal2_Tnaf, cal2_Tdir, msinlet_cal2phint,
nafion_mode_timestamp, direct_mode_timestamp)
def GasModeDetermination(sample_valve1, sample_valve2, sample_valve3, sample_valve4):
'''
This subroutine takes in the values of sample_valve1, sample_valve2,
sample_valve3, and sample_valve4 and returns the value for the AUX GASMODE
data product.
'''
data_array_size = np.shape(sample_valve1)
gasmode_array = | np.ones(data_array_size[0]) | numpy.ones |
#!/usr/bin/env python3
#
# Copyright 2015 Signal Processing Devices Sweden AB. All rights reserved.
#
# Description: ADQ14 OCT test script
# Documentation:
#
import numpy as np
import ctypes as ct
import matplotlib.pyplot as plt
import sys
import time
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__))+'/..')
from modules.example_helpers import *
# Record settings
# Setting stream_number_of_records to 2^32-1 means we want infinite number of records
stream_number_of_records = 2**32-1
samples_per_record = 500
OCTCount = 100
# separate parameter for how many we collect in this script before plotting
number_of_records = min([stream_number_of_records, 50])
pretrig = 0; #offset collected samples to before trigger point
holdoff = 0; #offset collected samples to after trigger point
# Plot data if set to True
plot_data = True
# DMA transfer buffer settings
transfer_buffer_size = 1024
num_transfer_buffers = 8
# DMA flush timeout in seconds
flush_timeout = 0.5
# Target buffers in software (Must be 4)
num_sw_buffers = 4;
# Load ADQAPI
ADQAPI = adqapi_load()
# Create ADQControlUnit
adq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())
# Enable error logging from ADQAPI
ADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
# Exit if no devices were found
if n_of_ADQ < 1:
print('No ADQ connected.')
ADQAPI.DeleteADQControlUnit(adq_cu)
adqapi_unload(ADQAPI)
sys.exit(1)
# Select ADQ
if n_of_ADQ > 1:
adq_num = int(input('Select ADQ device 1-{:d}: '.format(n_of_ADQ)))
else:
adq_num = 1
print('Reseting device ')
ADQAPI.ADQ_ResetDevice(adq_cu, adq_num, 16)
# Delete ADQ device handle
ADQAPI.ADQControlUnit_DeleteADQ(adq_cu, adq_num)
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
# Exit if no devices were found
if n_of_ADQ < 1:
print('No ADQ connected.')
ADQAPI.DeleteADQControlUnit(adq_cu)
adqapi_unload(ADQAPI)
sys.exit(1)
# Select ADQ
if n_of_ADQ > 1:
adq_num = int(input('Select ADQ device 1-{:d}: '.format(n_of_ADQ)))
else:
adq_num = 1
print_adq_device_revisions(ADQAPI, adq_cu, adq_num)
# Set clock source
ADQ_CLOCK_INT_INTREF = 0
ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, ADQ_CLOCK_INT_INTREF)
# Set OCT parameters
#trig_type = 1 #software trigger
trig_type = 7 #oct trigger
#ADQAPI.ADQ_OCTDebug(adq_cu, adq_num, 0x5e72f039, 1, 0) #use debug data instead of analog inputs
ADQAPI.ADQ_OCTSetTriggerCount(adq_cu, adq_num, OCTCount) #set OCT frame count
ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, 1) #first set software trigger to clear OCT counters
ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trig_type) #set triggering mode
# Setup acquisition
# Use 0 in the last two arguments since they are currently unsupported, we use separate calls afterwards to set pretrig and holdoff.
ADQAPI.ADQ_TriggeredStreamingSetup(adq_cu, adq_num,
stream_number_of_records,
samples_per_record,
0, 0);
ADQAPI.ADQ_SetPreTrigSamples(adq_cu, adq_num, pretrig);
ADQAPI.ADQ_SetTriggerHoldOffSamples(adq_cu, adq_num, holdoff);
# Setup streaming mode
ADQAPI.ADQ_SetStreamStatus(adq_cu, adq_num, 2)
# Get number of channels from device
number_of_channels = ADQAPI.ADQ_GetNofChannels(adq_cu, adq_num)
# Setup size of transfer buffers
print('Setting up streaming...')
ADQAPI.ADQ_SetTransferBuffers(adq_cu, adq_num, num_transfer_buffers, transfer_buffer_size)
ADQAPI.ADQ_PDSetupStreaming(adq_cu, adq_num, 0xf)
# Start streaming
print('Collecting data, please wait...')
ADQAPI.ADQ_StopStreaming(adq_cu, adq_num)
ADQAPI.ADQ_StartStreaming(adq_cu, adq_num)
# Allocate target buffers for intermediate data storage
target_buffers = (ct.POINTER(ct.c_int16*transfer_buffer_size)*num_sw_buffers)()
for bufp in target_buffers:
bufp.contents = (ct.c_int16*transfer_buffer_size)()
# Create some buffers for the full records
data_16bit = [np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
np.array([], dtype=np.int16),
| np.array([], dtype=np.int16) | numpy.array |
import torch
import argparse
from torch.utils.data import DataLoader
from torch import nn, optim
from torchvision.transforms import transforms
from unet import Unet
from dataset import FundusDataset
import cv2
import time
import copy
from torch.optim import lr_scheduler
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from attunet import AttUNet
from resunet import Resnet34_Unet
from resunets import ResNet, resnet34
from RAUNet import RAUNet, RAUNet34
from torchvision.transforms import Compose, CenterCrop, ToTensor, Resize
# 是否使用cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
x_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
# mask只需要转换为tensor
y_transforms = transforms.ToTensor()
#定义空列表
Loss_list_train = []
Accuracy_list_train = []
IOU_list_train = []
Loss_list_val = []
Accuracy_list_val = []
IOU_list_val = []
def performance_index_calculation_ACC(img1,img2):
ret1, thresh1 = cv2.threshold(img1 * 255, 127, 255, cv2.THRESH_BINARY)
ret2, thresh2 = cv2.threshold(img2 * 255, 50, 255, cv2.THRESH_BINARY)
erosion1 = cv2.dilate(thresh1, kernel=np.ones((3, 3), np.uint8))
dst1 = cv2.erode(erosion1, kernel=np.ones((3, 3), np.uint8)).astype(np.uint8)
erosion2 = cv2.dilate(thresh2, kernel=np.ones((3, 3), np.uint8))
dst2 = cv2.erode(erosion2, kernel=np.ones((3, 3), np.uint8)).astype(np.uint8)
dstj = cv2.bitwise_and(dst1, dst2)
area1 = areal = 0
contours, hierarchy = cv2.findContours(dst1, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) != 0:
for i in range(len(contours)):
areal += cv2.contourArea(contours[i])
contours1, hierarchy1 = cv2.findContours(dstj, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
if len(contours1) != 0:
for i in range(len(contours1)):
area1 += cv2.contourArea(contours1[i])
if (areal!= 0):
ACC = area1/areal
if (areal == 0):
ACC = 1.0
return ACC
def performance_index_calculation_IOU(img1,img2):
ret1, thresh1 = cv2.threshold(img1 * 255, 127, 255, cv2.THRESH_BINARY)
ret2, thresh2 = cv2.threshold(img2 * 255, 50, 255, cv2.THRESH_BINARY)
erosion1 = cv2.dilate(thresh1, kernel=np.ones((3, 3), np.uint8))
dst1 = cv2.erode(erosion1, kernel=np.ones((3, 3), np.uint8)).astype(np.uint8)
erosion2 = cv2.dilate(thresh2, kernel=np.ones((3, 3), np.uint8))
dst2 = cv2.erode(erosion2, kernel= | np.ones((3, 3), np.uint8) | numpy.ones |
"""
Copyright 2017 <NAME>, <NAME>
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import scipy as sp
import math
import matplotlib.pyplot as plt
from . import solver
from . import project_simplex_box
from . import pgd
import llops as yp
import llops.operators as ops
from llops.solvers import iterative, objectivefunctions
from llops import iFt, Ft
from llops.config import default_backend, default_dtype
eps = 1e-13
def dnf(x):
if len(x) == 0:
return 0
else:
# x = x / np.sum(x)
x_fft = np.fft.fft(x)
sigma_x = np.abs(x_fft) ** 2
return np.sqrt(1 / len(x) * np.sum(np.max(sigma_x) / sigma_x))
def cond(x):
if len(x) == 0:
return 0
else:
# x = x / np.sum(x)
x_fft = np.fft.fft(x)
sigma_x = np.abs(x_fft)
return np.max(sigma_x) / np.min(sigma_x)
def vector(pulse_count, kernel_length=None,
method='random_phase', n_tests=100, metric='dnf', dtype=None, backend=None):
"""
This is a helper function for solving for a blur vector in terms of it's condition #
"""
# Parse dtype and backend
dtype = dtype if dtype is not None else yp.config.default_dtype
backend = backend if backend is not None else yp.config.default_backend
# Calculate kernel length if not provided
if kernel_length is None:
kernel_length = 2 * pulse_count
# Compute many kernels
kernel_list = []
for _ in range(n_tests):
# Generate blur kernel
if method == 'random_phase':
# Ensure first and last time point are illuminated
indicies = np.random.choice(kernel_length, size=(pulse_count - 2), replace=False)
illum = np.zeros(kernel_length)
illum[indicies] = 1.0
illum[0], illum[-1] = 1.0, 1.0
elif method == 'random':
illum = np.random.uniform(size=kernel_length)
else:
raise ValueError('Invalid kernel generation method %s' % method)
# Append kernel to list
kernel_list.append(illum)
## Choose best kernel
if metric == 'cond':
# Determine kernel with best condition #
metric_best = 1e10
kernel_best = []
for kernel in kernel_list:
kappa = cond(kernel)
if kappa < metric_best:
kernel_best = kernel
metric_best = kappa
elif metric == 'dnf':
# Determine kernel with best dnf
metric_best = 1e10
kernel_best = []
for kernel in kernel_list:
_dnf = dnf(kernel)
if _dnf < metric_best:
kernel_best = kernel
metric_best = _dnf
else:
raise ValueError
# Normalize kernel
kernel_best /= np.sum(kernel_best)
# Cast
kernel_best = yp.cast(kernel_best, dtype, backend)
return (kernel_best, metric_best)
def kernel(shape, pulse_count, kernel_length=None, method='random_phase',
n_tests=100, metric='dnf', axis=1, position='center'):
# Generate blur vector
blur_vector, _ = vector(pulse_count,
kernel_length=kernel_length,
method=method,
n_tests=n_tests,
metric=metric)
# Generate kernel from vector
return fromVector(blur_vector, shape=shape, axis=axis, position=position)
def generate(shape, blur_kernel_length, method='random_phase', axis=1,
blur_illumination_fraction=0.5, position='center',normalize=True):
# Generate blur kernel
if method == 'constant':
illum = yp.ones(blur_kernel_length) * blur_illumination_fraction
elif method == 'random_phase' or method == 'coded':
illum, _ = genRandInitialization(blur_kernel_length, blur_illumination_fraction)
elif method == 'random' or method == 'uniform':
illum = np.random.uniform(size=blur_kernel_length)
else:
assert False, "method " + method + " unrecognized"
# Generate kernel
kernel = fromVector(illum, shape, axis, position, normalize=normalize)
# Return kernel
return kernel
def fromVector(blur_vector, shape, axis=1, position='center',
normalize=True, reverse=False, interpolation_factor=1.0):
"""Converts a blur vector to a blur kernel."""
# Get length of kernel
blur_kernel_length = yp.size(blur_vector)
# Find correct dimension
ndims = len(shape)
# Expand illum to 2D and ensure it's in the correct direction
blur_vector = yp.expandDims(blur_vector, ndims)
# Reverse blur vector if requested
if reverse:
blur_vector = yp.flip(blur_vector)
# Ensure blur vector is 1D
blur_vector = yp.vec(blur_vector)
# Apply interpolation
if interpolation_factor != 1.0:
interpolated_length = int(np.round(interpolation_factor * len(blur_vector)))
blur_vector = yp.real(yp.iFt(yp.pad(yp.Ft(blur_vector), interpolated_length, center=True)))
# Ensure blur kernel has the correct dimensions
blur_vector = yp.expandDims(blur_vector, ndims)
# Rotate if necessary
if axis == 1:
blur_vector = blur_vector.T
# Position kernel in image
if position == 'center':
kernel = yp.pad(blur_vector, shape, center=True)
elif position == 'center_left':
roll_amount = [0, 0]
roll_amount[axis] = -blur_kernel_length // 2
kernel = yp.roll(yp.pad(blur_vector, shape, center=True), roll_amount)
elif position == 'center_right':
roll_amount = [0, 0]
roll_amount[axis] = blur_kernel_length // 2
kernel = yp.roll(yp.pad(blur_vector, shape, center=True), roll_amount)
elif position == 'origin':
kernel = yp.pad(blur_vector, shape, crop_start=(0, 0))
else:
raise ValueError('Invalid position %s' % position)
# Center kernel after pad. This is a hack.
roll_values = [1] * yp.ndim(kernel)
kernel = yp.roll(kernel, roll_values)
# Normalize kernel
if normalize:
kernel /= yp.scalar(yp.sum(kernel))
return kernel
######################################################################################################
################################ UTILITIES FOR READING FROM DATA #####################################
######################################################################################################
def blurVectorsFromDataset(dataset, dtype=None, backend=None, debug=False,
use_phase_ramp=False, corrections={}):
"""
This function generates the object size, image size, and blur kernels from
a comptic dataset object.
Args:
dataset: An io.Dataset object
dtype [np.float32]: Which datatype to use for kernel generation (All numpy datatypes supported)
Returns:
object_size: The object size this dataset can recover
image_size: The computed image size of the dataset
blur_kernel_list: A dictionary of blur kernels lists, one key per color channel.
"""
dtype = dtype if dtype is not None else yp.config.default_dtype
backend = backend if backend is not None else yp.config.default_backend
# Calculate effective pixel size if necessaey
if dataset.metadata.system.eff_pixel_size_um is None:
dataset.metadata.system.eff_pixel_size_um = dataset.metadata.camera.pixel_size_um / \
(dataset.metadata.objective.mag * dataset.metadata.system.mag)
# Recover and store position and illumination list
blur_vector_roi_list = []
position_list, illumination_list = [], []
frame_segment_map = []
for frame_index in range(len(dataset.frame_list)):
frame_state = dataset.frame_state_list[frame_index]
# Store which segment this measurement uses
frame_segment_map.append(frame_state['position']['common']['linear_segment_index'])
# Extract list of illumination values for each time point
if 'illumination' in frame_state:
illumination_list_frame = []
for time_point in frame_state['illumination']['states']:
illumination_list_time_point = []
for illumination in time_point:
illumination_list_time_point.append(
{'index': illumination['index'], 'value': illumination['value']})
illumination_list_frame.append(illumination_list_time_point)
else:
raise ValueError('Frame %d does not contain illumination information' % frame_index)
# Extract list of positions for each time point
if 'position' in frame_state:
position_list_frame = []
for time_point in frame_state['position']['states']:
position_list_time_point = []
for position in time_point:
if 'units' in position['value']:
if position['value']['units'] == 'mm':
ps_um = dataset.metadata.system.eff_pixel_size_um
position_list_time_point.append(
[1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
elif position['value']['units'] == 'um':
position_list_time_point.append(
[position['value']['y'] / ps_um, position['value']['x'] / ps_um])
elif position['value']['units'] == 'pixels':
position_list_time_point.append([position['value']['y'], position['value']['x']])
else:
raise ValueError('Invalid units %s for position in frame %d' %
(position['value']['units'], frame_index))
else:
# print('WARNING: Could not find posiiton units in metadata, assuming mm')
ps_um = dataset.metadata.system.eff_pixel_size_um
position_list_time_point.append(
[1000 * position['value']['y'] / ps_um, 1000 * position['value']['x'] / ps_um])
position_list_frame.append(position_list_time_point[0]) # Assuming single time point for now.
# Define positions and position indicies used
positions_used, position_indicies_used = [], []
for index, pos in enumerate(position_list_frame):
for color in illumination_list_frame[index][0]['value']:
if any([illumination_list_frame[index][0]['value'][color] > 0 for color in illumination_list_frame[index][0]['value']]):
position_indicies_used.append(index)
positions_used.append(pos)
# Generate ROI for this blur vector
blur_vector_roi = getPositionListBoundingBox(positions_used)
# Append to list
blur_vector_roi_list.append(blur_vector_roi)
# Crop illumination list to values within the support used
illumination_list.append([illumination_list_frame[index] for index in range(min(position_indicies_used), max(position_indicies_used) + 1)])
# Store corresponding positions
position_list.append(positions_used)
# Apply kernel scaling or compression if necessary
if 'scale' in corrections:
for index in range(len(position_list)):
_positions = np.asarray(position_list[index])
for ax in range(yp.shape(_positions)[1]):
_positions[:, ax] = ((_positions[:, ax] - yp.min(_positions[:, ax])) * corrections['scale'] + yp.min(_positions[:, ax]))
position_list[index] = _positions.tolist()
blur_vector_roi_list[index].shape = [corrections['scale'] * sh for sh in blur_vector_roi_list[index].shape]
# Synthesize blur vectors
blur_vector_list = []
for frame_index in range(len(dataset.frame_list)):
# Generate blur vectors
if use_phase_ramp:
kernel_shape = [yp.fft.next_fast_len(max(sh, 1)) for sh in blur_vector_roi_list[frame_index].shape]
offset = yp.cast([sh // 2 + st for (sh, st) in zip(kernel_shape, blur_vector_roi_list[frame_index].start)], 'complex32')
# Create phase ramp and calculate offset
R = ops.PhaseRamp(kernel_shape, dtype='complex32')
# Generate blur vector
blur_vector = yp.zeros(R.M)
for pos, illum in zip(position_list[frame_index], illumination_list[frame_index]):
blur_vector += (R * (yp.cast(pos, 'complex32') - offset))
# Take inverse Fourier Transform
blur_vector = yp.abs(yp.cast(yp.iFt(blur_vector)), 0.0)
else:
blur_vector = yp.asarray([illum[0]['value']['w'] for illum in illumination_list[frame_index]],
dtype=dtype, backend=backend)
# Normalize illuminaiton vectors
blur_vector /= yp.scalar(yp.sum(blur_vector))
# Append to list
blur_vector_list.append(blur_vector)
# Subtract mininum of frame_segment_map
frame_segment_map = [segment - min(frame_segment_map) for segment in frame_segment_map]
# Return
return blur_vector_list, blur_vector_roi_list, frame_segment_map, position_list, illumination_list
def blurKernelRecoveryFromStatic(blurred, static, solver='iterative', reg=None, iteration_count=10, system_otf=None, threshold=0.2):
static_mean = np.mean(static)
if static_mean > 1e-4:
static = (static.copy() - static_mean) / static_mean
blurred_mean = np.mean(blurred)
if blurred_mean > 1e-4:
blurred = (blurred.copy() - blurred_mean) / blurred_mean
# if system_otf is not None:
# static = iFt(Ft(static) * system_otf)
if solver == 'iterative':
A = ops.Convolution(blurred.shape, static, mode='windowed')
y = blurred.reshape(-1).astype(np.complex64)
# Initialization: choosing a "good" coefficient value will help in convergence
initialization = np.ones(y.shape, y.dtype)
# Define cost function
objective = objectivefunctions.L2(A, y, l2_reg=reg) #, reg=5e-3)
# Gradient descent implementation
kernel_recovered = iterative.GradientDescent(objective).solve(initialization=initialization,
step_size=1e-3,
nesterov_enabled=True,
iteration_count=iteration_count,
display_type='text',
display_iteration_delta=max((iteration_count // 10),1))
else:
if reg is None:
reg = 0
kernel_recovered = iFt((np.conj(Ft(static)) * Ft(blurred)) / (np.abs(Ft(static)) ** 2 + reg))
# Take real part
kernel_recovered = np.real(kernel_recovered).reshape(static.shape)
# Subtract low-frequency information
kernel_recovered -= scipy.ndimage.filters.gaussian_filter(np.real(kernel_recovered.reshape(blurred.shape)), 10)
# Filter by OTF support, threshold
if system_otf is not None:
kernel_recovered = np.real(iFt(Ft(kernel_recovered.reshape(blurred.shape)) * system_otf))
kernel_recovered *= (kernel_recovered > threshold * np.max(kernel_recovered))
return(kernel_recovered)
def registerDatasetImages(dataset, roi=None):
from comptic.registration import registerImage
shift_list = []
image_list = []
for index in range(1, len(dataset.frame_list)):
if roi is not None:
shift_list.append(registerImage(dataset.frame_list[index - 1][roi.slice],
dataset.frame_list[index][roi.slice]))
image_list.append((dataset.frame_list[index - 1][roi.slice], dataset.frame_list[index][roi.slice]))
else:
shift_list.append(registerImage(dataset.frame_list[index - 1], dataset.frame_list[index]))
print(shift_list)
print("Registered image %d of %d, shift was (%d, %d) pixels" %
(index, len(dataset.frame_list), shift_list[-1][0], shift_list[-1]))
return(shift_list, image_list)
def cropAndCenterKernel(kernel_recovered, kernel_size):
# Center maximum value in blur kernel
max_pos = np.unravel_index(np.argmax(kernel_recovered), kernel_recovered.shape)
kernel_centered = np.roll(kernel_recovered, -np.asarray(max_pos) + np.asarray(kernel_recovered.shape) //2)
# Crop to 2x blur kernel fov
kernel_zeroed = np.zeros(kernel_centered.shape, dtype=kernel_centered.dtype)
kernel_zeroed[kernel_centered.shape[0] // 2 - kernel_size[0]:kernel_centered.shape[0] // 2 + kernel_size[0],
kernel_centered.shape[1] // 2 - kernel_size[1]:kernel_centered.shape[1] // 2 + kernel_size[1]] = \
kernel_centered[kernel_centered.shape[0] // 2 - kernel_size[0]:kernel_centered.shape[0] // 2 + kernel_size[0],
kernel_centered.shape[1] // 2 - kernel_size[1]:kernel_centered.shape[1] // 2 + kernel_size[1]]
# Center at middle of blur kernel
p = np.where(kernel_zeroed > 0)
kernel_centered = np.roll(kernel_zeroed, -np.round(np.asarray((np.mean(p[0]), np.mean(p[1]))) + np.asarray(kernel_zeroed.shape) // 2).astype(np.int))
kernel_size_small = kernel_size //2
# Zero everything outside a resonable shift range
kernel_zeroed_crop = np.zeros(kernel_centered.shape, dtype=kernel_centered.dtype)
kernel_zeroed_crop[kernel_centered.shape[0] // 2 - kernel_size_small[0]:kernel_centered.shape[0] // 2 + kernel_size_small[0],
kernel_centered.shape[1] // 2 - kernel_size_small[1]:kernel_centered.shape[1] // 2 + kernel_size_small[1]] = \
kernel_centered[kernel_centered.shape[0] // 2 - kernel_size_small[0]:kernel_centered.shape[0] // 2 + kernel_size_small[0],
kernel_centered.shape[1] // 2 - kernel_size_small[1]:kernel_centered.shape[1] // 2 + kernel_size_small[1]]
return(kernel_zeroed_crop)
def plotBlurKernelList(blur_kernel_list, max_count_to_show=5, measurement_list=None, figsize=None):
""" Plots a list of blur kernels and (optionally) corresponding measurements """
count_to_show = min(max_count_to_show, len(blur_kernel_list))
if figsize is None:
plt.figure(figsize=(count_to_show * 2.5, 4 * (1 + int(measurement_list is not None))))
else:
plt.figure(figsize=figsize)
for i in range(count_to_show):
plt.subplot(1 + int(measurement_list is not None), count_to_show, i + 1)
plt.imshow(blur_kernel_list[i], interpolation='bilinear')
plt.title('Blur Kernel ' + str(i))
def illustrateMultiFrameKernel(blur_kernel_list, filename):
""" Function which illustrates a multi-frame blur kernel and saves it to the disk"""
image_c = np.zeros((blur_kernel_list[0].shape[0], blur_kernel_list[0].shape[1], 3))
color_list = ['r', 'g', 'c', 'm', 'w', 'y']
for index, blur_kernel in enumerate(blur_kernel_list):
rgb = matplotlib.colors.to_rgb(color_list[index])
image_c[:, :, 0] += blur_kernel * rgb[0]
image_c[:, :, 1] += blur_kernel * rgb[1]
image_c[:, :, 2] += blur_kernel * rgb[2]
image_c /= np.amax(image_c)
plt.figure()
plt.imshow(image_c, interpolation='bilinear')
plt.xticks([], [])
plt.yticks([], [])
plt.tight_layout()
plt.savefig(filename, transparent=True)
def genSamplingComb(object_size, image_size, dtype=np.complex64):
""" Generates a comb function corresponding with seperation defined by
image_size, centered at the center of object_size """
sampling = np.floor(((np.asarray(object_size) / 2) / np.asarray(image_size)))
sampling_comb = np.zeros(object_size, dtype=dtype)
yy, xx = np.meshgrid(np.arange(-sampling[0], sampling[0] + 1), np.arange(-sampling[1], sampling[1] + 1))
positions_0 = np.hstack((yy.ravel()[:, np.newaxis], xx.ravel()[:, np.newaxis])).astype(np.int)
positions = np.zeros(positions_0.shape, dtype=positions_0.dtype)
positions[:, 0] = object_size[0] // 2 + positions_0[:, 0] * image_size[0]
positions[:, 1] = object_size[1] // 2 + positions_0[:, 1] * image_size[1]
for position in positions:
sampling_comb[position[0], position[1]] = 1
positions -= np.asarray(object_size) // 2
return((sampling_comb, positions))
def genConvolutionSupportList(blur_kernel_list, image_size, threshold=0.05):
"""
This function generates a list of images defining the support of a windowed convolution operation.
"""
object_size = blur_kernel_list[0].shape
W = ops.Crop(object_size, image_size)
kernel_support_mask = []
object_support_mask = []
print(W.dtype)
window_mask = np.abs(W.H * W * np.ones(W.shape[1], dtype=np.complex64)).reshape(object_size)
for blur_kernel in blur_kernel_list:
C = ops.Convolution((blur_kernel > threshold).astype(np.complex64), mode='windowed',
pad_value=0, pad_size=int(object_size[0] / 2))
kernel_support_mask += [((C * (window_mask.reshape(-1).astype(np.complex64))).reshape(object_size) > threshold)]
object_support_mask.append(kernel_support_mask[-1])
for dim in range(kernel_support_mask[-1].ndim):
object_support_mask[-1] = np.flip(object_support_mask[-1], dim)
return (kernel_support_mask, object_support_mask)
def blurKernelFromPositions(object_size, position_list, illum_list, flip_kernels=False, use_phase_ramp=False,
pos_perturbation=None, dtype=default_dtype, backend=default_backend):
"""
This function generates a single blur kernel from a list of positions and illuminations. not multiframe.
"""
# Initialize blur kernels
blur_kernel = np.zeros(object_size, dtype=np.complex64)
for position_index, position in enumerate(position_list):
y = position[0]
x = position[1]
if pos_perturbation is not None:
y = y + pos_perturbation[position_index, 0]
x = x + pos_perturbation[position_index, 1]
if not use_phase_ramp:
x = int(round(x))
y = int(round(y))
# Assign illumination values
if illum_list[position_index] > 0:
if not use_phase_ramp:
blur_kernel[y, x] += illum_list[position_index]
else:
R = ops.PhaseRamp(blur_kernel.shape, dtype=dtype, backend=backend)
x_ = yp.astype(np.asarray((y - object_size[0] // 2, x - object_size[1] // 2)), R.dtype)
ramp = yp.reshape(R * x_, blur_kernel.shape)
blur_kernel += (ramp * illum_list[position_index])
if use_phase_ramp:
blur_kernel = iFt(blur_kernel)
blur_kernel[blur_kernel < 1e-8] = 0.0
if flip_kernels:
blur_kernel = np.fliplr(blur_kernel)
if np.sum(blur_kernel) > 0:
blur_kernel /= np.sum(blur_kernel)
return blur_kernel
def positionListToBlurKernelMap(kernel_size, position_list, return_fourier=True):
"""Function which converts a list of positions in a blur kernel to a full (non-sparse) blur kernel map.
Args:
kernel_size: Size of first two dimensions in blur_kernel_map
position_list: List of x,y tuples which are the locaitons of each position in the blur kernel.
return_fourier: Optional, enables return of blur kernels in frequency (Fourier) domain.
Returns:
A 2D blur_kernel_map, which has dimensions (kernel_size[0], kernel_size[1], size(position_list,1))
"""
# TODO redundant
print("can this be replaced with blurKernelFromPositions?")
n_positions = np.size(position_list, 0)
blur_kernel_map = np.zeros((n_positions, kernel_size[0], kernel_size[1]))
for pos in np.arange(0, n_positions):
blur_kernel_map[pos, position_list[pos, 0], position_list[pos, 1]] = 1
if return_fourier:
blur_kernel_map = Ft(blur_kernel_map.astype(np.complex64))
return(blur_kernel_map)
def pointListToBlurKernel(kernel_size, position_list, illumination_vector):
"""Converts point list and illuminaiton vector to blur kernel"""
# TODO redundant
print("can this be replaced with blurKernelFromPositions?")
position_count = np.size(position_list, 0)
blur_kernel = np.zeros((kernel_size[0], kernel_size[1]))
assert position_count == len(illumination_vector)
for index, position in enumerate(position_list):
blur_kernel[position[0], position[1]] = illumination_vector[index]
return(blur_kernel)
def colorBlurKernelsToMonochrome(blur_kernel_list_color):
"""
This function converts a list of color blur kernels to monochrome, assuming no optical effects.
Args:
blur_kernel_list_color: A dictionary of blur kernel lists, where each key indicates the illumination color channel of that kernel.
Returns:
A list of blur kernels which is the sum of the lists of each key in blur_kernel_list_color
"""
blur_kernel_list = []
for index, blur_kernel in enumerate(blur_kernel_list_color):
first_channel = list(blur_kernel.keys())[0]
new_kernel = np.zeros(blur_kernel[first_channel].shape, dtype=blur_kernel[first_channel].dtype)
for channel in blur_kernel:
new_kernel += blur_kernel[channel]
blur_kernel_list.append(new_kernel)
return(blur_kernel_list)
def getPositionListBoundingBox(kernel_position_list, use_mean=False):
"""
This function returns the bounding box of a single blur kernel or list of blur kernels, defined as a list of positions
Args:
kernel_position_list: list of points (y,x)
Returns:
A list of the extreme values in the blur kernel in the format [y_min, y_max, x_min, x_max]
"""
bounding_box = [1e10, -1e10, 1e10, -1e10]
assert type(kernel_position_list) in [list, np.ndarray]
# Make a single kernel_position_list a list with one element
if type(kernel_position_list[0][0]) not in [list, np.ndarray, tuple]:
kernel_position_list = [kernel_position_list]
for position in kernel_position_list:
if type(position[0][0]) in [np.ndarray, list, tuple]:
# TODO: This will break if we blur by more than one pixel during each pixel motion
if not use_mean:
max_y, max_x = np.max(np.asarray(position), axis=0)[0]
min_y, min_x = np.min(np.asarray(position), axis=0)[0]
else:
mean_y, mean_x = np.mean(np.asarray(position), axis=0)[0]
else:
if not use_mean:
max_y, max_x = np.max(np.asarray(position), axis=0)
min_y, min_x = np.min(np.asarray(position), axis=0)
else:
mean_y, mean_x = np.mean(np.asarray(position), axis=0)
if not use_mean:
bounding_box = [min(min_y, bounding_box[0]),
max(max_y, bounding_box[1]),
min(min_x, bounding_box[2]),
max(max_x, bounding_box[3])]
else:
bounding_box = [min(mean_y, bounding_box[0]),
max(mean_y, bounding_box[1]),
min(mean_x, bounding_box[2]),
max(mean_x, bounding_box[3])]
# Create ROI object
kernel_support_roi = yp.Roi(start=(int(round(bounding_box[0])), int(round(bounding_box[2]))),
end=(int(round(bounding_box[1])), int(round(bounding_box[3]))))
return(kernel_support_roi)
######################################################################################################
##################################### AUTOCALIBRATION ################################################
######################################################################################################
class BsplineND():
# from http://pythology.blogspot.com/2017/07/nd-b-spline-basis-functions-with-scipy.html
def __init__(self, knots, degree=3, periodic=False):
"""
:param knots: a list of the spline knots with ndim = len(knots)
TODO (sarah) incorporate 2d aspect?
"""
self.ndim = len(knots)
self.splines = []
self.knots = knots
self.degree = degree
for idim, knots1d in enumerate(knots):
nknots1d = len(knots1d)
y_dummy = np.zeros(nknots1d)
knots1d, coeffs, degree = sp.interpolate.splrep(knots1d, y_dummy, k=degree,
per=periodic)
self.splines.append((knots1d, coeffs, degree))
self.ncoeffs = [len(coeffs) for knots, coeffs, degree in self.splines]
def evaluate_independent(self, position):
"""
:param position: a numpy array with size [ndim, npoints]
:returns: a numpy array with size [nspl1, nspl2, ..., nsplN, npts]
with the spline basis evaluated at the input points
"""
ndim, npts = position.shape
values_shape = self.ncoeffs + [npts]
values = np.empty(values_shape)
ranges = [range(icoeffs) for icoeffs in self.ncoeffs]
for icoeffs in itertools.product(*ranges):
values_dim = np.empty((ndim, npts))
for idim, icoeff in enumerate(icoeffs):
coeffs = [1.0 if ispl == icoeff else 0.0 for ispl in
range(self.ncoeffs[idim])]
values_dim[idim] = sp.interpolate.splev(
position[idim],
(self.splines[idim][0], coeffs, self.degree))
values[icoeffs] = np.product(values_dim, axis=0)
return values
def evaluate(self, position):
assert self.weights is not None, "Must specify coefficients with set_coeffs()"
values = self.evaluate_independent(position)
return self.weights.dot(values)
def set_weights(self, weights):
assert len(weights) == self.ncoeffs[0], "must input correct number of weights"
self.weights = weights
def get_basis_splines(extent, num_basis_fn):
knotsx = | np.linspace(0,extent-1,num_basis_fn) | numpy.linspace |
from summit import Runner
from summit.utils.multiobjective import pareto_efficient, hypervolume
from neptune.sessions import Session, HostedNeptuneBackend
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib as mpl
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import plotly.graph_objects as go
import plotly.express as px
from pandas.plotting import parallel_coordinates
import os
import zipfile
import shutil
import warnings
from textwrap import wrap
import collections
def flatten(d, parent_key="", sep="_"):
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
COLORS = [
(165, 0, 38),
(215, 48, 39),
(244, 109, 67),
(253, 174, 97),
(254, 224, 144),
(255, 255, 191),
(224, 243, 248),
(171, 217, 233),
(116, 173, 209),
(69, 117, 180),
(49, 54, 149),
]
COLORS = np.array(COLORS) / 256
CMAP = ListedColormap(COLORS)
class PlotExperiments:
"""Make plots from benchmarks tracked on Neptune
Parameters
----------
project : str, optional
The name of the Neptune.ai project
experiment_ids : list of str, optional
A list of experiment ids to pull from Neptune.ai
csv_filename : str, optional
Name of the CSV filename
tag : list of str, optional
A list of tags used as filters
state : str, optional
The state of the experiments. Must be succeeded, failed, running or aborted.
trajectory_length : int, optional
The maximum number of iterations for each experiment. Defaults to 50.
num_repeats : int, optional
The number of repeats required for each hyperparameter combination.s
"""
def __init__(
self,
project: str = None,
experiment_ids: list = None,
csv_filename: str = None,
tag: list = None,
state: list = None,
trajectory_length=50,
num_repeats=20,
):
self.session = Session(backend=HostedNeptuneBackend())
self.proj = self.session.get_project(project)
self.runners = {}
self.experiment_ids = experiment_ids
self.tag = tag
self.state = state
self.trajectory_length = trajectory_length
self.num_repeats = num_repeats
self._restore_runners()
self._create_param_df()
def _restore_runners(self):
"""Restore runners from Neptune Artifacts"""
# Download artifacts
n_experiments = len(self.experiment_ids)
experiments = []
if n_experiments > 100:
for i in range(n_experiments // 100):
experiments += self.proj.get_experiments(
id=self.experiment_ids[i * 100 : (i + 1) * 100],
tag=self.tag,
state=self.state,
)
remainder = n_experiments % 100
experiments += self.proj.get_experiments(
id=self.experiment_ids[(i + 1) * 100 : (i + 1) * 100 + remainder],
tag=self.tag,
state=self.state,
)
else:
experiments = self.proj.get_experiments(
id=self.experiment_ids, tag=self.tag, state=self.state
)
for experiment in experiments:
path = f"data/{experiment.id}"
try:
os.mkdir(
path,
)
except FileExistsError:
pass
experiment.download_artifacts(destination_dir=path)
# Unzip somehow
files = os.listdir(path)
with zipfile.ZipFile(path + "/" + files[0], "r") as zip_ref:
zip_ref.extractall(path)
# Get filename
path += "/output"
files = os.listdir(path)
files_json = [f for f in files if ".json" in f]
if len(files_json) == 0:
warnings.warn(f"{experiment.id} has no file attached.")
continue
# Restore runner
r = Runner.load(path + "/" + files_json[0])
self.runners[experiment.id] = r
# Remove file
shutil.rmtree(f"data/{experiment.id}")
def _create_param_df(self, reference=[-2957, 10.7]):
"""Create a parameters dictionary
Parameters
----------
reference : array-like, optional
Reference for the hypervolume calculatio
"""
records = []
for experiment_id, r in self.runners.items():
record = {}
record["experiment_id"] = experiment_id
# Transform
transform_name = r.strategy.transform.__class__.__name__
transform_params = r.strategy.transform.to_dict()["transform_params"]
record["transform_name"] = transform_name
if transform_name == "Chimera":
hierarchy = transform_params["hierarchy"]
for objective_name, v in hierarchy.items():
key = f"{objective_name}_tolerance"
record[key] = v["tolerance"]
elif transform_name == "MultitoSingleObjective":
record.update(transform_params)
# Strategy
record["strategy_name"] = r.strategy.__class__.__name__
# Batch size
record["batch_size"] = r.batch_size
# Number of initial experiments
try:
record["num_initial_experiments"] = r.n_init
except AttributeError:
pass
# Terminal hypervolume
data = r.experiment.data[["sty", "e_factor"]].to_numpy()
data[:, 0] *= -1 # make it a minimzation problem
y_front, _ = pareto_efficient(
data[: self.trajectory_length, :], maximize=False
)
hv = hypervolume(y_front, ref=reference)
record["terminal_hypervolume"] = hv
# Computation time
time = (
r.experiment.data["computation_t"]
.iloc[0 : self.trajectory_length]
.sum()
)
record["computation_t"] = time
record["noise_level"] = r.experiment.noise_level
records.append(record)
# Make pandas dataframe
self.df = pd.DataFrame.from_records(records)
return self.df
def _create_label(self, unique):
transform_text = unique["transform_name"]
chimera_params = f" (STY tol.={unique['sty_tolerance']}, E-factor tol.={unique['e_factor_tolerance']})"
transform_text += (
chimera_params if unique["transform_name"] == "Chimera" else ""
)
return f"{unique['strategy_name']}, {transform_text}, {unique['num_initial_experiments']} initial experiments"
def best_pareto_grid(self, ncols=3, figsize=(20, 40)):
"""Make a grid of pareto plots
Only includes the run with the maximum terminal hypervolume for each
unique hyperparameter combination.
Parameters
----------
ncols : int, optional
The number of columns in the grid. Defaults to 3
figsize : tuple, optional
The figure size. Defaults to 20 wide x 40 high
"""
# Group experiment repeats
df = self.df.copy()
df = df.set_index("experiment_id")
df = df.drop(columns=["terminal_hypervolume", "computation_t"])
uniques = df.drop_duplicates(keep="last") # This actually groups them
uniques = uniques.sort_values(by=["strategy_name", "transform_name"])
df_new = self.df.copy()
nrows = len(uniques) // ncols
nrows += 1 if len(uniques) % ncols != 0 else 0
fig = plt.figure(figsize=figsize)
fig.subplots_adjust(wspace=0.2, hspace=0.5)
i = 1
# Loop through groups of repeats
for index, unique in uniques.iterrows():
# Find number of matching rows to this unique row
temp_df = df_new.merge(unique.to_frame().transpose(), how="inner")
# Find experiment with maximum hypervolume
max_hv_index = temp_df["terminal_hypervolume"].argmax()
experiment_id = temp_df.iloc[max_hv_index]["experiment_id"]
# Get runner
r = self.runners[experiment_id]
# Create pareto plot
ax = plt.subplot(nrows, ncols, i)
old_data = r.experiment._data.copy()
r.experiment._data = r.experiment.data.iloc[: self.trajectory_length, :]
r.experiment.pareto_plot(ax=ax)
r.experiment._data = old_data
title = self._create_label(unique)
title = "\n".join(wrap(title, 30))
ax.set_title(title)
ax.set_xlabel(r"Space Time Yield / $kg \; m^{-3} h^{-1}$")
ax.set_ylabel("E-factor")
ax.set_xlim(0.0, float(1.2e4))
ax.set_ylim(0.0, 300.0)
ax.ticklabel_format(axis="x", style="scientific")
i += 1
return fig
def plot_hv_trajectories(
self,
reference=[-2957, 10.7],
plot_type="matplotlib",
include_experiment_ids=False,
min_terminal_hv_avg=0,
ax=None,
):
"""Plot the hypervolume trajectories with repeats as 95% confidence interval
Parameters
----------
reference : array-like, optional
Reference for the hypervolume calculation. Defaults to -2957, 10.7
plot_type : str, optional
Plotting backend to use: matplotlib or plotly. Defaults to matplotlib.
include_experiment_ids : bool, optional
Whether to include experiment ids in the plot labels
min_terminal_hv_avg : float, optional`
Minimum terminal average hypervolume cutoff for inclusion in the plot. Defaults to 0.
"""
# Create figure
if plot_type == "matplotlib":
if ax is not None:
fig = None
else:
fig, ax = plt.subplots(1)
elif plot_type == "plotly":
fig = go.Figure()
else:
raise ValueError(
f"{plot_type} is not a valid plot type. Must be matplotlib or plotly."
)
# Group experiment repeats
df = self.df.copy()
df = df.set_index("experiment_id")
df = df.drop(columns=["terminal_hypervolume", "computation_t"])
uniques = df.drop_duplicates(keep="last") # This actually groups them
df_new = self.df.copy()
if plot_type == "plotly":
colors = px.colors.qualitative.Plotly
else:
colors = COLORS
cycle = len(colors)
c_num = 0
self.hv = {}
for index, unique in uniques.iterrows():
# Find number of matching rows to this unique row
temp_df = df_new.merge(unique.to_frame().transpose(), how="inner")
ids = temp_df["experiment_id"].values
# Calculate hypervolume trajectories
ids = ids if len(ids) < self.num_repeats else ids[: self.num_repeats]
hv_trajectories = np.zeros([self.trajectory_length, len(ids)])
for j, experiment_id in enumerate(ids):
r = self.runners[experiment_id]
data = r.experiment.data[["sty", "e_factor"]].to_numpy()
data[:, 0] *= -1 # make it a minimzation problem
for i in range(self.trajectory_length):
y_front, _ = pareto_efficient(data[0 : i + 1, :], maximize=False)
hv_trajectories[i, j] = hypervolume(y_front, ref=reference)
# Mean and standard deviation
hv_mean_trajectory = | np.mean(hv_trajectories, axis=1) | numpy.mean |
# -*- coding: utf-8 -*-
# run in py3 !!
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1";
import tensorflow as tf
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth = True
tf.Session(config=config)
import numpy as np
from sklearn import preprocessing
import tensorflow as tf
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pandas as pd
from keras import backend as K
import keras.layers.convolutional as conv
from keras.layers import merge
from keras.wrappers.scikit_learn import KerasRegressor
from keras import utils
from keras.layers.pooling import MaxPooling1D, MaxPooling2D
from keras.layers import pooling
from keras.models import Sequential, Model
from keras.regularizers import l1, l2
from keras import layers
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras import initializers
from keras.callbacks import EarlyStopping
from keras import callbacks
from keras import backend as K
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.models import Model
from keras import initializers, layers
from keras.optimizers import SGD, Adadelta, Adam
from keras.regularizers import l1, l2
from keras import regularizers
import sys
sys.path.append('.')
from hist_figure import his_figures
if len(sys.argv) > 1:
prefix = sys.argv[1]
else:
prefix = time.time()
DATAPATH = '5fold/'
RESULT_PATH = './results/'
feature_num = 25
batch_num = 2
# batch_size = 32
batch_size = 512
SEQ_LENGTH = 20
STATEFUL = False
scaler = None # tmp, for fit_transform
# id,usage,date,com_date,week,month,year
# com_date,date,id,month,usage,week,year
def get_data(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print(path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['error'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['super', 'com_date', 'error', 'numbers']].copy()
# print(df_empty)
df_super = df_empty.ix[:, [0]]
df_com_date = df_empty.ix[:, [1]]
df_error = df_empty.ix[:, [2]]
df_numbers = df_empty.ix[:, [3]]
X_train_ = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train_ = df_error.as_matrix()
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_super = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [1]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [3]])
df_numbers = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [2]])
df_error = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train = df_error.as_matrix()
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
y_arr = Y_train.T.tolist()
# print(y_arr)
try:
y_arr = ss_y.inverse_transform(y_arr)
#draw_error_line(y_arr[0], df)
#draw_error_bar(y_arr[0])
except Exception as e:
print(e)
if not issplit:
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
X_train, X_test, Y_train, Y_test = train_test_split(X_train_, Y_train_, test_size=0.1, shuffle=False)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
return X_train, Y_train, X_test, Y_test, X_val, Y_val
else:
return split_CV(X_train, Y_train, sequence_length=sequence_length, stateful=False)
import datetime
def get_data_single_user(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' + path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['usage'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['com_date', 'usage']].copy()
# print(df_empty)
df_com_date = df_empty.ix[:, [0]]
df_usage = df_empty.ix[:, [1]]
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [1]])
df_usage = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = np.column_stack((df_week, df_month))
Y_train = df_usage.as_matrix()
print(X_train)
print(Y_train.shape)
y_arr = Y_train.T.tolist()
# print(y_arr)
print(df)
y_arr = ss_y.inverse_transform(y_arr)
draw_error_line(y_arr[0], df)
draw_error_bar(y_arr[0])
# try:
#
# except Exception as e:
# print(e)
if not issplit:
return X_train, Y_train
else:
return split_CV(X_train, Y_train, sequence_length=sequence_length, stateful=False)
def inverse_xy_transform(scaler, *para):
temp = []
for i in para:
print(i.reshape(-1, 1))
temp.append(scaler.inverse_transform(i.reshape(-1, 1)))
return temp
def split_CV(X_train, Y_train, sequence_length=20, stateful=False):
"""return ndarray
"""
print(X_train)
print(Y_train.shape[0])
result_x = []
result_y = []
for index in range(len(Y_train) - sequence_length):
result_x.append(X_train[index: index + sequence_length])
# result_y.append(Y_train[index: index + sequence_length])
result_y.append(Y_train[index + sequence_length])
X_train = np.array(result_x)
Y_train = np.array(result_y)
print(X_train.shape) # (705, 20, 24)
print(Y_train.shape) # (705, 1)
print('##################################################################')
if stateful == True:
# X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.1,shuffle=False)
cp_X_train = X_train.copy()
cp_Y_train = Y_train.copy()
X_train = cp_X_train[:640, ...]
X_test = cp_X_train[640:, ...]
Y_train = cp_Y_train[:640, ...]
Y_test = cp_Y_train[640:, ...]
print(X_test.shape[0]) #
print(Y_test.shape[0]) #
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
print('##################################################################')
if stateful == False:
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
# print(X_train.shape)#(705, 20, 24)
# print(Y_train.shape)#(705, 1)
# train_x_disorder = X_train.reshape((X_train.shape[0],X_train.shape[1] , feature_num))
# test_x_disorder = X_test.reshape((X_test.shape[0],X_test.shape[1], feature_num ))
# X_val = X_val.reshape((X_val.shape[0], X_val.shape[1] , feature_num))
# print(train_x_disorder.dtype)
train_y_disorder = Y_train.reshape(-1, 1)
test_y_disorder = Y_test.reshape(-1, 1)
Y_val = Y_val.reshape(-1, 1)
print(X_train.shape[0]) # (705, 20, 24)
print(Y_train.shape[0]) # (705, 1)
print('@' * 40)
# print(X_test)
print(train_y_disorder.shape)
print('@' * 40)
return [X_train, train_y_disorder, X_test, test_y_disorder, X_val, Y_val] # ndarray
def LSTM2(X_train):
model = Sequential()
# layers = [1, 50, 100, 1]
layers = [1, 30, 30, 1]
if STATEFUL == False:
model.add(LSTM(
layers[1],
input_shape=(X_train.shape[1], X_train.shape[2]),
stateful=STATEFUL,
return_sequences=True,
kernel_initializer='he_normal'
# , kernel_regularizer=l2(0.01)
))
else:
model.add(LSTM(
layers[1],
# input_shape=(X_train.shape[1], X_train.shape[2]),
batch_input_shape=(batch_size, X_train.shape[1], X_train.shape[2]),
stateful=STATEFUL,
return_sequences=True,
kernel_initializer='he_normal'
# , kernel_regularizer=l2(0.01)
))
# model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
stateful=STATEFUL,
return_sequences=False,
kernel_initializer='he_normal'
# ,kernel_regularizer=l2(0.01)
))
model.add(Dropout(0.2))
# model.add(Flatten())
model.add(Dense(
layers[3]
, kernel_initializer='he_normal'
, kernel_regularizer=l2(0.01)
, activity_regularizer=l1(0.01)
))
model.add(BatchNormalization())
model.add(Activation("linear"))
start = time.time()
sgd = SGD(lr=1e-3, decay=1e-8, momentum=0.9, nesterov=True)
ada = Adadelta(lr=1e-4, rho=0.95, epsilon=1e-6)
rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-6, decay=1e-8)
adam = Adam(lr=1e-3)
# model.compile(loss="mse", optimizer=sgd)
# try:
# model.load_weights("./lstm.h5")
# except Exception as ke:
# print(str(ke))
model.compile(loss="mse", optimizer=adam)
print("Compilation Time : ", time.time() - start)
return model
def draw_error_bar(y_array):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(y_array)))
plt.bar(x, y_array, label='error')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('error bar')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + 'bar_error.png', dpi=300)
def draw_error_line(y_array, df):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(y_array)))
plt.plot(x, y_array, label='error')
x = list(range(len(df['error'])))
plt.plot(x, df['error'], label='error')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('error plot')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + 'line_error.png', dpi=300)
def draw_scatter(predicted, y_test, X_test, x_train, y_train, data_file):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(predicted)))
total_width, n = 0.8, 2
width = total_width / n
plt.bar(x, y_test.T[0], width=width, label='truth', fc='y')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, predicted, width=width, label='predict', fc='r')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('lstm')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'bar_lstm.png', dpi=300)
fig = plt.figure()
plt.scatter(y_test.T[0], predicted)
# plt.plot(y_test.T[0], predicted, linewidth =0.3, color='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('truth')
plt.ylabel('predict')
# plt.show()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + '_scatter_lstm.png',
dpi=300)
def draw_line(predicted, y_test, X_test, x_train, y_train, data_file):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(predicted)))
total_width, n = 0.8, 2
width = total_width / n
plt.bar(x, y_test.T[0], width=width, label='True', fc='y')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, predicted, width=width, label='Predicted', fc='r')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('lstm')
# plt.show()
axes.grid()
axes = fig.add_subplot(1, 1, 1)
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'bar_lstm.png', dpi=300)
fig = plt.figure()
plt.scatter(y_test.T[0], predicted)
# plt.plot(y_test.T[0], predicted, linewidth =0.3, color='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('True')
plt.ylabel('Predicted')
# plt.show()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + '_scatter_lstm.png',
dpi=300)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
plt.plot(x, y_test.T[0], label='True')
for i in range(len(x)):
x[i] = x[i] + width
plt.plot(x, predicted, label='Predicted')
plt.legend()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'line_lstm.png', dpi=300)
def stat_metrics(X_test, y_test, predicted):
predicted = np.reshape(predicted, y_test.shape[0])
train_error = np.abs(y_test - predicted)
mean_error = np.mean(train_error)
min_error = np.min(train_error)
max_error = np.max(train_error)
std_error = np.std(train_error)
print(predicted)
print(y_test.T[0])
print( | np.mean(X_test) | numpy.mean |
import json
import os
import random
import cv2
import numpy as np
import torch
import torch.utils.data
from tqdm import tqdm
def collate_function_seq(batch):
dem_frames = torch.stack([item[0] for item in batch])
dem_actions = torch.stack([item[1] for item in batch])
dem_lens = [item[2] for item in batch]
query_frames = torch.stack([item[3] for item in batch])
target_actions = torch.stack([item[4] for item in batch])
return [dem_frames, dem_actions, dem_lens, query_frames, target_actions]
def index_data(json_list, path_list):
print(f'processing files {len(json_list)}')
data_tuples = []
for j, v in tqdm(zip(json_list, path_list)):
with open(j, 'r') as f:
state = json.load(f)
ep_lens = [len(x) for x in state]
past_len = 0
for e, l in enumerate(ep_lens):
data_tuples.append([])
# skip first 30 frames and last 83 frames
for f in range(30, l - 83):
# find action taken;
f0x, f0y = state[e][f]['agent'][0]
f1x, f1y = state[e][f + 1]['agent'][0]
dx = (f1x - f0x) / 2.
dy = (f1y - f0y) / 2.
action = [dx, dy]
# action = ACTION_LIST.index([dx, dy])
data_tuples[-1].append((v, past_len + f, action))
assert len(data_tuples[-1]) > 0
past_len += l
return data_tuples
class TransitionDataset(torch.utils.data.Dataset):
"""
Training dataset class for the behavior cloning mlp model.
Args:
path: path to the dataset
types: list of video types to include
size: size of the frames to be returned
mode: train, val
num_context: number of context state-action pairs
num_test: number of test state-action pairs
num_trials: number of trials in an episode
action_range: number of frames to skip; actions are combined over these number of frames (displcement) of the agent
process_data: whether to the videos or not (skip if already processed)
__getitem__:
returns: (dem_frames, dem_actions, query_frames, target_actions)
dem_frames: (num_context, 3, size, size)
dem_actions: (num_context, 2)
query_frames: (num_test, 3, size, size)
target_actions: (num_test, 2)
"""
def __init__(self, path, types=None, size=None, mode="train", num_context=30, num_test=1, num_trials=9,
action_range=10, process_data=0):
self.path = path
self.types = types
self.size = size
self.mode = mode
self.num_trials = num_trials
self.num_context = num_context
self.num_test = num_test
self.action_range = action_range
self.ep_combs = self.num_trials * (self.num_trials - 2) # 9p2 - 9
self.eps = [[x, y] for x in range(self.num_trials) for y in range(self.num_trials) if x != y]
types_str = '_'.join(self.types)
self.path_list = []
self.json_list = []
# get video paths and json file paths
for t in types:
print(f'reading files of type {t} in {mode}')
paths = [os.path.join(self.path, t, x) for x in os.listdir(os.path.join(self.path, t)) if
x.endswith(f'.mp4')]
jsons = [os.path.join(self.path, t, x) for x in os.listdir(os.path.join(self.path, t)) if
x.endswith(f'.json') and 'index' not in x]
paths = sorted(paths)
jsons = sorted(jsons)
if mode == 'train':
self.path_list += paths[:int(0.8 * len(jsons))]
self.json_list += jsons[:int(0.8 * len(jsons))]
elif mode == 'val':
self.path_list += paths[int(0.8 * len(jsons)):]
self.json_list += jsons[int(0.8 * len(jsons)):]
else:
self.path_list += paths
self.json_list += jsons
self.data_tuples = []
if process_data:
# index the videos in the dataset directory. This is done to speed up the retrieval of videos.
# frame index, action tuples are stored
self.data_tuples = index_data(self.json_list, self.path_list)
# tuples of frame index and action (displacement of agent)
index_dict = {'data_tuples': self.data_tuples}
with open(os.path.join(self.path, f'index_bib_{mode}_{types_str}.json'), 'w') as fp:
json.dump(index_dict, fp)
else:
# read pre-indexed data
with open(os.path.join(self.path, f'index_bib_{mode}_{types_str}.json'), 'r') as fp:
index_dict = json.load(fp)
self.data_tuples = index_dict['data_tuples']
self.tot_trials = len(self.path_list) * 9
def _get_frame(self, video, frame_idx):
cap = cv2.VideoCapture(video)
# read frame at id and resize
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
_, frame = cap.read()
if self.size is not None:
assert frame is not None, f'frame is empty {frame_idx}, {video}'
frame = cv2.resize(frame, self.size)
frame = torch.tensor(frame).permute(2, 0, 1)
# return frames as a torch tensor f x c x w x h
frame = frame.to(torch.float32) / 255.
cap.release()
return frame
def get_trial(self, trials, num_transitions, step=1):
# retrieve state embeddings and actions from cached file
states = []
actions = []
trial_len = []
for t in trials:
trial_len += [(t, n) for n in range(0, len(self.data_tuples[t]), step)]
random.shuffle(trial_len)
if len(trial_len) < num_transitions:
return None, None, False
for t, n in trial_len[:num_transitions]:
video = self.data_tuples[t][n][0]
states.append(self._get_frame(video, self.data_tuples[t][n][1]))
# actions are pooled over frames
if len(self.data_tuples[t]) > n + self.action_range:
actions_xy = [d[2] for d in self.data_tuples[t][n:n + self.action_range]]
else:
actions_xy = [d[2] for d in self.data_tuples[t][n:]]
actions_xy = np.array(actions_xy)
actions_xy = np.mean(actions_xy, axis=0)
actions.append(actions_xy)
states = torch.stack(states, dim=0)
actions = torch.tensor(np.array(actions))
return states, actions, True
def __getitem__(self, idx):
# retrieve expert trajectories
dem = False
test = False
# dem and test check for valid trajectory samples
while not dem or not test:
ep_trials = [idx * self.num_trials + t for t in range(self.num_trials)]
random.shuffle(ep_trials)
dem_states, dem_actions, dem = self.get_trial(ep_trials[:-1], self.num_context)
test_states, test_actions, test = self.get_trial([ep_trials[-1]], self.num_test,
step=self.action_range)
return dem_states, dem_actions, test_states, test_actions
def __len__(self):
return self.tot_trials // self.num_trials
class TestTransitionDataset(torch.utils.data.Dataset):
"""
Test dataset class for the behavior cloning mlp model. This dataset is used to test the model on the eval data.
This class is used to compare plausible and implausible episodes.
Args:
path: path to the dataset
types: video type to evaluate on
size: size of the frames to be returned
mode: test
num_context: number of context state-action pairs
num_test: number of test state-action pairs
num_trials: number of trials in an episode
action_range: number of frames to skip; actions are combined over these number of frames (displcement) of the agent
process_data: whether to the videos or not (skip if already processed)
__getitem__:
returns: (expected_dem_frames, expected_dem_actions, expected_query_frames, expected_target_actions,
unexpected_dem_frames, unexpected_dem_actions, unexpected_query_frames, unexpected_target_actions)
dem_frames: (num_context, 3, size, size)
dem_actions: (num_context, 2)
query_frames: (num_test, 3, size, size)
target_actions: (num_test, 2)
"""
def __init__(self, path, task_type=None, size=None, mode="test", num_context=30, num_test=1, num_trials=9,
action_range=10, process_data=0):
self.path = path
self.task_type = task_type
self.size = size
self.mode = mode
self.num_trials = num_trials
self.num_context = num_context
self.num_test = num_test
self.action_range = action_range
self.ep_combs = self.num_trials * (self.num_trials - 2) # 9p2 - 9
self.eps = [[x, y] for x in range(self.num_trials) for y in range(self.num_trials) if x != y]
self.path_list_exp = []
self.json_list_exp = []
self.path_list_un = []
self.json_list_un = []
print(f'reading files of type {task_type} in {mode}')
paths_expected = sorted([os.path.join(self.path, task_type, x) for x in os.listdir(os.path.join(self.path, task_type)) if
x.endswith(f'e.mp4')])
jsons_expected = sorted([os.path.join(self.path, task_type, x) for x in os.listdir(os.path.join(self.path, task_type)) if
x.endswith(f'e.json') and 'index' not in x])
paths_unexpected = sorted([os.path.join(self.path, task_type, x) for x in os.listdir(os.path.join(self.path, task_type)) if
x.endswith(f'u.mp4')])
jsons_unexpected = sorted([os.path.join(self.path, task_type, x) for x in os.listdir(os.path.join(self.path, task_type)) if
x.endswith(f'u.json') and 'index' not in x])
self.path_list_exp += paths_expected
self.json_list_exp += jsons_expected
self.path_list_un += paths_unexpected
self.json_list_un += jsons_unexpected
self.data_unexpected = []
self.data_expected = []
if process_data:
# index data. This is done speed up video retrieval.
# frame index, action tuples are stored
self.data_expected = self.index_data(self.json_list_exp, self.path_list_exp)
index_dict = {'data_tuples': self.data_expected}
with open(os.path.join(self.path, f'index_bib_test_{task_type}e.json'), 'w') as fp:
json.dump(index_dict, fp)
self.data_unexpected = self.index_data(self.json_list_un, self.path_list_un)
index_dict = {'data_tuples': self.data_unexpected}
with open(os.path.join(self.path, f'index_bib_test_{task_type}u.json'), 'w') as fp:
json.dump(index_dict, fp)
else:
# load pre-indexed data
with open(os.path.join(self.path, f'index_bib_test_{task_type}e.json'), 'r') as fp:
index_dict = json.load(fp)
self.data_expected = index_dict['data_tuples']
with open(os.path.join(self.path, f'index_bib_test_{task_type}u.json'), 'r') as fp:
index_dict = json.load(fp)
self.data_unexpected = index_dict['data_tuples']
def _get_frame(self, video, frame_idx):
cap = cv2.VideoCapture(video)
# read frame at id and resize
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
_, frame = cap.read()
if self.size is not None:
assert frame is not None, f'frame is empty {frame_idx}, {video}'
frame = cv2.resize(frame, self.size)
frame = torch.tensor(frame).permute(2, 0, 1)
# return frames as a torch tensor f x c x w x h
frame = frame.to(torch.float32) / 255.
cap.release()
return frame
def get_trial(self, trials, data, step=1, shuffle=False):
# retrieve state embeddings and actions from cached file
states = []
actions = []
trial_len = []
for t in trials:
trial_len += [(t, n) for n in range(0, len(data[t]), step)]
if shuffle:
random.shuffle(trial_len)
trial_len = trial_len[:100]
for t, n in trial_len:
video = data[t][n][0]
states.append(self._get_frame(video, data[t][n][1]))
actions_xy = [d[2] for d in data[t][n:n + self.action_range]]
actions_xy = np.array(actions_xy)
actions_xy = np.mean(actions_xy, axis=0)
actions.append(actions_xy)
states = torch.stack(states, dim=0)
actions = torch.tensor(np.array(actions))
return states, actions
def __getitem__(self, idx):
# retrieve expert trajectories
ep_trials = [idx * self.num_trials + t for t in range(self.num_trials)]
# retrieve complete fam trajectories
fam_expected_states, fam_expected_actions = self.get_trial(ep_trials[:-1], self.data_expected, shuffle=True)
fam_unexpected_states, fam_unexpected_actions = self.get_trial(ep_trials[:-1], self.data_unexpected,
shuffle=True)
# retrieve complete test trajectories
test_expected_states, test_expected_actions = self.get_trial([ep_trials[-1]], self.data_expected,
step=self.action_range)
test_unexpected_states, test_unexpected_actions = self.get_trial([ep_trials[-1]], self.data_unexpected,
step=self.action_range)
return fam_expected_states, fam_expected_actions, test_expected_states, test_expected_actions, \
fam_unexpected_states, fam_unexpected_actions, test_unexpected_states, test_unexpected_actions
def __len__(self):
return len(self.data_expected) // 9
class TransitionDatasetSequence(torch.utils.data.Dataset):
"""
Training dataset class for the behavior cloning mlp model.
Args:
path: path to the dataset
types: list of video types to include
size: size of the frames to be returned
mode: train, val
num_context: number of context state-action pairs
num_test: number of test state-action pairs
num_trials: number of trials in an episode
action_range: number of frames to skip; actions are combined over these number of frames (displcement) of the agent
process_data: whether to the videos or not (skip if already processed)
max_len: maximum length of the sequence
__getitem__:
returns: (dem_frames, dem_actions, dem_lens, query_frames, target_actions)
dem_frames: (num_context, 3, size, size)
dem_actions: (num_context, max_len, 2)
dem_lens: (num_context)
query_frames: (num_test, 3, size, size)
target_actions: (num_test, 2)
"""
def __init__(self, path, types=None, size=None, mode="train", num_context=30, num_test=1, num_trials=9,
action_range=10, process_data=0, max_len=30):
self.path = path
self.types = types
self.size = size
self.mode = mode
self.num_trials = num_trials
self.num_context = num_context
self.num_test = num_test
self.action_range = action_range
self.max_len = max_len
self.ep_combs = self.num_trials * (self.num_trials - 2) # 9p2 - 9
self.eps = [[x, y] for x in range(self.num_trials) for y in range(self.num_trials) if x != y]
types_str = '_'.join(self.types)
self.path_list = []
self.json_list = []
for t in types:
print(f'reading files of type {t} in {mode}')
paths = [os.path.join(self.path, t, x) for x in os.listdir(os.path.join(self.path, t)) if
x.endswith(f'.mp4')]
jsons = [os.path.join(self.path, t, x) for x in os.listdir(os.path.join(self.path, t)) if
x.endswith(f'.json') and 'index' not in x]
paths = sorted(paths)
jsons = sorted(jsons)
if mode == 'train':
self.path_list += paths[:int(0.8 * len(jsons))]
self.json_list += jsons[:int(0.8 * len(jsons))]
elif mode == 'val':
self.path_list += paths[int(0.8 * len(jsons)):]
self.json_list += jsons[int(0.8 * len(jsons)):]
else:
self.path_list += paths
self.json_list += jsons
self.data_tuples = []
if process_data:
# index the videos for quicker video retrieval.
self.data_tuples = index_data(self.json_list, self.path_list)
index_dict = {'data_tuples': self.data_tuples}
with open(os.path.join(self.path, f'index_bib_{mode}_{types_str}.json'), 'w') as fp:
json.dump(index_dict, fp)
else:
# load pre-existing index
with open(os.path.join(self.path, f'index_bib_{mode}_{types_str}.json'), 'r') as fp:
index_dict = json.load(fp)
self.data_tuples = index_dict['data_tuples']
self.tot_trials = len(self.path_list) * 9
def _get_frame(self, video, frame_idx):
cap = cv2.VideoCapture(video)
# read frame at id and resize
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
_, frame = cap.read()
if self.size is not None:
assert frame is not None, f'frame is empty {frame_idx}, {video}'
frame = cv2.resize(frame, self.size)
frame = torch.tensor(frame).permute(2, 0, 1)
# return frames as a torch tensor f x c x w x h
frame = frame.to(torch.float32) / 255.
cap.release()
return frame
def get_trial(self, trials, step=10):
# retrieve state embeddings and actions from cached file
states = []
actions = []
trial_len = []
lens = []
for t in trials:
tl = [(t, n) for n in range(0, len(self.data_tuples[t]), step)]
if len(tl) > self.max_len:
tl = tl[:self.max_len]
trial_len.append(tl)
for tl in trial_len:
states.append([])
actions.append([])
lens.append(len(tl))
for t, n in tl:
video = self.data_tuples[t][n][0]
states[-1].append(self._get_frame(video, self.data_tuples[t][n][1]))
if len(self.data_tuples[t]) > n + self.action_range:
actions_xy = [d[2] for d in self.data_tuples[t][n:n + self.action_range]]
else:
actions_xy = [d[2] for d in self.data_tuples[t][n:]]
actions_xy = np.array(actions_xy)
actions_xy = | np.mean(actions_xy, axis=0) | numpy.mean |
import pytest
import numpy as np
from lumicks.pylake.detail.image import reconstruct_image, reconstruct_num_frames, save_tiff, ImageMetadata, line_timestamps_image
def test_metadata_from_json():
json = { 'cereal_class_version': 1,
'fluorescence': True,
'force': False,
'scan count': 0,
'scan volume': {'center point (um)': {'x': 58.075877109272604,
'y': 31.978375270573267,
'z': 0},
'cereal_class_version': 1,
'pixel time (ms)': 0.5,
'scan axes': [{'axis': 0,
'cereal_class_version': 1,
'num of pixels': 240,
'pixel size (nm)': 150,
'scan time (ms)': 0,
'scan width (um)': 36.07468112612217}]}}
image_metadata = ImageMetadata.from_dataset(json)
res = image_metadata.resolution
assert np.isclose(res[0], 1e7 / 150)
assert np.isclose(res[1], 1e7 / 150)
assert res[2] == 'CENTIMETER'
assert np.isclose(image_metadata.metadata['PixelTime'], .0005)
assert image_metadata.metadata['PixelTimeUnit'] == 's'
def test_timestamps_image():
infowave = np.array([0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2])
time = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
line_stamps = line_timestamps_image(time, infowave, 5)
assert line_stamps.shape == (1,)
assert np.all(line_stamps == [1])
line_stamps = line_timestamps_image(time, infowave, 2)
assert line_stamps.shape == (2,)
assert np.all(line_stamps == [1, 15])
def test_reconstruct():
infowave = np.array([0, 1, 0, 1, 1, 0, 2, 1, 0, 1, 0, 0, 1, 2, 1, 1, 1, 2])
the_data = np.array([1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
image = reconstruct_image(the_data, infowave, 5)
assert image.shape == (1, 5)
assert np.all(image == [4, 8, 12, 0, 0])
image = reconstruct_image(the_data, infowave, 2)
assert image.shape == (2, 2)
assert np.all(image == [[4, 8], [12, 0]])
def test_reconstruct_multiframe():
size = 100
infowave = np.ones(size)
infowave[9::10] = 2
the_data = np.arange(size)
assert reconstruct_image(the_data, infowave, 5).shape == (2, 5)
assert reconstruct_image(the_data, infowave, 2).shape == (5, 2)
assert reconstruct_image(the_data, infowave, 1).shape == (10, 1)
assert reconstruct_image(the_data, infowave, 2, 2).shape == (3, 2, 2)
assert reconstruct_image(the_data, infowave, 2, 3).shape == (2, 3, 2)
assert reconstruct_image(the_data, infowave, 2, 5).shape == (5, 2)
assert reconstruct_num_frames(infowave, 2, 2) == 3
assert reconstruct_num_frames(infowave, 2, 3) == 2
assert reconstruct_num_frames(infowave, 2, 5) == 1
def test_int_tiff(tmpdir):
def grab_tags(file):
import tifffile
from ast import literal_eval
with tifffile.TiffFile(file) as tif:
tiff_tags = {}
for tag in tif.pages[0].tags.values():
name, value = tag.name, tag.value
try:
tiff_tags[name] = literal_eval(value)
except (ValueError, SyntaxError):
tiff_tags[name] = value
return tiff_tags
image16 = np.ones(shape=(10, 10, 3)) * | np.iinfo(np.uint16) | numpy.iinfo |
from ctapipe.utils import get_dataset_path
import numpy as np
import pytest
import os
import shutil
import pandas as pd
from lstchain.io.io import dl1_params_lstcam_key, dl2_params_lstcam_key
from lstchain.reco.utils import filter_events
import astropy.units as u
test_dir = 'testfiles'
os.makedirs(test_dir, exist_ok=True)
mc_gamma_testfile = get_dataset_path('gamma_test_large.simtel.gz')
dl1_file = os.path.join(test_dir, 'dl1_gamma_test_large.h5')
dl2_file = os.path.join(test_dir, 'dl2_gamma_test_large.h5')
fake_dl2_proton_file = os.path.join(test_dir, 'dl2_fake_proton.simtel.h5')
fake_dl1_proton_file = os.path.join(test_dir, 'dl1_fake_proton.simtel.h5')
file_model_energy = os.path.join(test_dir, 'reg_energy.sav')
file_model_disp = os.path.join(test_dir, 'reg_disp_vector.sav')
file_model_gh_sep = os.path.join(test_dir, 'cls_gh.sav')
custom_config = {
"events_filters": {
"intensity": [0.3, np.inf],
"width": [0, 10],
"length": [0, 10],
"wl": [0, 1],
"r": [0, 1],
"leakage2_intensity": [0, 1]
},
"tailcut": {
"picture_thresh":6,
"boundary_thresh":2,
"keep_isolated_pixels": True,
"min_number_picture_neighbors": 1
},
"random_forest_regressor_args": {
"max_depth": 5,
"min_samples_leaf": 2,
"n_jobs": 4,
"n_estimators": 15,
},
"random_forest_classifier_args": {
"max_depth": 5,
"min_samples_leaf": 2,
"n_jobs": 4,
"n_estimators": 10,
},
"regression_features": [
"intensity",
"width",
"length",
"x",
"y",
"wl",
"skewness",
"kurtosis",
],
"classification_features": [
"intensity",
"width",
"length",
"x",
"y",
"log_reco_energy",
"reco_disp_dx",
"reco_disp_dy"
],
"allowed_tels": [1, 2, 3, 4],
"image_extractor": "GlobalPeakWindowSum",
"image_extractor_config": {},
"gain_selector": "ThresholdGainSelector",
"gain_selector_config": {
"threshold": 4094
}
}
def test_import_calib():
from lstchain import calib
def test_import_reco():
from lstchain import reco
def test_import_visualization():
from lstchain import visualization
def test_import_lstio():
from lstchain import io
@pytest.mark.run(order=1)
def test_r0_to_dl1():
from lstchain.reco.r0_to_dl1 import r0_to_dl1
infile = mc_gamma_testfile
r0_to_dl1(infile, custom_config=custom_config, output_filename=dl1_file)
def test_get_source_dependent_parameters():
from lstchain.reco.dl1_to_dl2 import get_source_dependent_parameters
dl1_params = pd.read_hdf(dl1_file, key=dl1_params_lstcam_key)
src_dep_df = get_source_dependent_parameters(dl1_params, custom_config)
@pytest.mark.run(order=2)
def test_build_models():
from lstchain.reco.dl1_to_dl2 import build_models
infile = dl1_file
reg_energy, reg_disp, cls_gh = build_models(infile, infile, custom_config=custom_config, save_models=False)
import joblib
joblib.dump(reg_energy, file_model_energy)
joblib.dump(reg_disp, file_model_disp)
joblib.dump(cls_gh, file_model_gh_sep)
@pytest.mark.run(order=3)
def test_apply_models():
from lstchain.reco.dl1_to_dl2 import apply_models
import joblib
dl1 = pd.read_hdf(dl1_file, key=dl1_params_lstcam_key)
dl1 = filter_events(dl1, filters=custom_config["events_filters"])
reg_energy = joblib.load(file_model_energy)
reg_disp = joblib.load(file_model_disp)
reg_cls_gh = joblib.load(file_model_gh_sep)
dl2 = apply_models(dl1, reg_cls_gh, reg_energy, reg_disp, custom_config=custom_config)
dl2.to_hdf(dl2_file, key=dl2_params_lstcam_key)
def produce_fake_dl1_proton_file():
"""
Produce a fake dl2 proton file by copying the dl2 gamma test file
and changing mc_type
"""
events = pd.read_hdf(dl1_file, key=dl1_params_lstcam_key)
events.mc_type = 101
events.to_hdf(fake_dl1_proton_file, key=dl1_params_lstcam_key)
def produce_fake_dl2_proton_file():
"""
Produce a fake dl2 proton file by copying the dl2 gamma test file
and changing mc_type
"""
events = pd.read_hdf(dl2_file, key=dl2_params_lstcam_key)
events.mc_type = 101
events.to_hdf(fake_dl2_proton_file, key=dl2_params_lstcam_key)
@pytest.mark.run(after='produce_fake_dl2_proton_file')
def test_sensitivity():
from lstchain.mc.sensitivity import find_best_cuts_sensitivity, sensitivity
produce_fake_dl2_proton_file()
nfiles_gammas = 1
nfiles_protons = 1
eb = 10 # Number of energy bins
gb = 11 # Number of gammaness bins
tb = 10 # Number of theta2 bins
obstime = 50 * 3600 * u.s
noff = 2
E, best_sens, result, units, gcut, tcut = find_best_cuts_sensitivity(dl1_file,
dl1_file,
dl2_file,
fake_dl2_proton_file,
1, 1,
eb, gb, tb, noff,
obstime)
E, best_sens, result, units, dl2 = sensitivity(dl1_file,
dl1_file,
dl2_file,
fake_dl2_proton_file,
1, 1,
eb, gcut, tcut * (u.deg ** 2), noff,
obstime)
@pytest.mark.last
def test_clean_test_files():
"""
Function to clean the test files created by the previous test
"""
import shutil
shutil.rmtree(test_dir)
def test_disp_vector():
from lstchain.reco.disp import disp_vector
dx = np.cos(np.pi/3 * np.ones(3))
dy = np.sin(np.pi/3 * np.ones(3))
disp_angle = np.pi/3 * np.ones(3)
disp_norm = | np.ones(3) | numpy.ones |
import os
import numpy as np
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AllenCahn_2D_FD import allencahn_fullyimplicit
from pySDC.implementations.problem_classes.AllenCahn_2D_FFT import allencahn2d_imex
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.playgrounds.Allen_Cahn.AllenCahn_monitor import monitor
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-11
level_params['dt'] = 1E-05
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [5]
sweeper_params['QI'] = ['LU']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['nvars'] = [(128, 128)]
problem_params['eps'] = [0.04]
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1E-12
problem_params['lin_tol'] = 1E-12
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = allencahn_fullyimplicit # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return description, controller_params
def setup_parameters_FFT():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-11
level_params['dt'] = 1E-04
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [5]
sweeper_params['QI'] = ['LU']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['nvars'] = [(128, 128)]
problem_params['eps'] = [0.04]
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1E-12
problem_params['lin_tol'] = 1E-12
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = allencahn2d_imex # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return description, controller_params
def run_reference(Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
# load (incomplete) default parameters
description, controller_params = setup_parameters_FFT()
# setup parameters "in time"
t0 = 0
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params,
description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by variant (number of iterations)
filtered_stats = filter_stats(stats, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts = sort_stats(filtered_stats, sortby='time')
# compute and print statistics
niters = | np.array([item[1] for item in iter_counts]) | numpy.array |
'''
###############################################################################
"MajoranaNanowire" Python3 Module
v 1.0 (2020)
Created by <NAME> (2018)
###############################################################################
"H_class/Lutchyn_Oreg/builders" submodule
This sub-package builds Lutchyn-Oreg Hamiltonians.
###############################################################################
'''
#%%############################################################################
######################## Required Packages ############################
###############################################################################
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
import scipy.constants as cons
from MajoranaNanowires.Functions import diagonal
#%%
def LO_1D_builder(N,dis,m_eff,mu,B,aR,d, space='position', k_vec=np.nan ,sparse='no'):
"""
1D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 1D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is an array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
an array with the on-site Rashba couplings in the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is an array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones(N)
if np.isscalar(mu):
mu = mu * np.ones(N)
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRy=np.zeros(N)
aRz=aR*np.ones(N)/(2*dis)
elif np.ndim(aR)==1:
if len(aR)==3:
aRy=aR[1]*np.ones(N)/(2*dis)
aRz=aR[2]*np.ones(N)/(2*dis)
else:
aRy=np.zeros(N)
aRz=aR/(2*dis)
else:
aRy=aR[1]/(2*dis)
aRz=aR[2]/(2*dis)
if np.isscalar(d):
d = d * np.ones(N)
if space=='momentum':
n_k=len(k_vec)
#Obtain the hopping and on-site energies:
t=cons.hbar**2/(2*m_eff*cons.m_e*(dis*1e-9)**2)/cons.e*1e3
e = 2 * t - mu
##Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(4 * N), int(4 * N)),dtype=complex)
elif sparse=='yes':
H=scipy.sparse.dok_matrix((int(4*N),int(4*N)),dtype=complex)
t, aRy, Bz = np.repeat(t,2), np.repeat(aRy,2), np.repeat(Bz,2)
Bz[1::2], aRy[1::2] = -Bz[::2], -aRy[::2]
for i in range(2):
H[diagonal(2*N*(i+1),init=2*N*i,k=1,step=2)], H[diagonal(2*N*(i+1),init=2*N*i,k=-1,step=2)] = (-1)**(i)*Bx-1j*By, (-1)**(i)*Bx+1j*By
H[diagonal(2*N*(i+1),init=2*N*i)] = (-1)**i*(Bz+np.repeat(e,2))
H[diagonal(2*N*(i+1),init=2*N*i,k=-2)] = -1*(-1)**i*t[2::]+1j*aRy[2::]
H[diagonal(2*N*(i+1),init=2*N*i,k=2)] = -1*(-1)**i*t[2::]-1j*aRy[2::]
H[diagonal(2*N*(i+1),k=1,step=2,init=1+2*N*i)] += -1*(-1)**i*aRz[1::]
H[diagonal(2*N*(i+1),k=-1,step=2,init=1+2*N*i)] += -1*(-1)**i*aRz[1::]
H[diagonal(2*N*(i+1),init=2*N*i,k=3,step=2)] += (-1)**i*aRz[1::]
H[diagonal(2*N*(i+1),init=2*N*i,k=-3,step=2)] += (-1)**i*aRz[1::]
H[diagonal(4*N,k=2*N+1,step=2)], H[diagonal(4*N,k=-2*N-1,step=2)] = -np.conj(d), -d
H[diagonal(4*N,k=2*N-1,step=2,init=1)], H[diagonal(4*N,k=-2*N+1,step=2,init=1)] = np.conj(d), d
#Build it in momentum space if required:
if space=='momentum':
if sparse=='no':
H_k = np.zeros((int(4 * N), int(4 * N), int(n_k)),dtype=complex)
for i in range(n_k):
H_k[:,:,i]=H
H_k[2 * (N - 1):2 * (N - 1) + 2, 0: 2,i] += np.array([[-t[2]-1j*aRy[2], aRz[1]], [-aRz[1], -t[2]+1j*aRy[2]]])*np.exp(-1j*k_vec[i]*N)
H_k[2 * (N - 1)+2*N:2 * (N - 1) + 2+2*N, 2*N: 2+2*N,i] += -np.array([[-t[2]+1j*aRy[2], aRz[1]], [-aRz[1], -t[2]-1j*aRy[2]]])*np.exp(1j*k_vec[i]*N)
H_k[0: 2, 2 * (N - 1):2 * (N - 1) + 2,i] += np.array([[-t[2]+1j*aRy[2], -aRz[1]], [aRz[1], -t[2]-1j*aRy[2]]])*np.exp(1j*k_vec[i]*N)
H_k[2*N: 2+2*N, 2 * (N - 1)+2*N:2 * (N - 1) + 2+2*N,i] += -np.array([[-t[2]-1j*aRy[2], -aRz[1]], [aRz[1], -t[2]+1j*aRy[2]]])*np.exp(-1j*k_vec[i]*N)
return (H_k)
elif sparse=='yes':
return(H)
else:
return (H)
#%%
def LO_1D_builder_NoSC(N,dis,m_eff,mu,B,aR, space='position', k_vec=np.nan ,sparse='no'):
"""
1D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 1D
Lutchy-Oreg chain without superconductivity.
Parameters
----------
N: int or arr
Number of sites.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is an array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is an array, each element is the on-site
chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
an array with the on-site Rashba couplings in the direction i.
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones(N)
if np.isscalar(mu):
mu = mu * np.ones(N)
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRy=np.zeros(N)
aRz=aR*np.ones(N)/(2*dis)
elif np.ndim(aR)==1:
if len(aR)==3:
aRy=aR[1]*np.ones(N)/(2*dis)
aRz=aR[2]*np.ones(N)/(2*dis)
else:
aRy=np.zeros(N)
aRz=aR/(2*dis)
else:
aRy=aR[1]/(2*dis)
aRz=aR[2]/(2*dis)
if space=='momentum':
n_k=len(k_vec)
#Obtain the hopping and the on-site energies:
t=cons.hbar**2/(2*m_eff*cons.m_e*(dis*1e-9)**2)/cons.e*1e3
e = 2 * t - mu
##Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(2 * N), int(2 * N)),dtype=complex)
elif sparse=='yes':
H=scipy.sparse.dok_matrix((int(2*N),int(2*N)),dtype=complex)
Bz,Bx,By=np.repeat(Bz,2),np.repeat(Bx,2), 1j*np.repeat(By,2)
Bx[1::2], By[1::2], Bz[1::2] = 0, 0, -Bz[::2]
H[diagonal(2*N,k=1)], H[diagonal(2*N,k=-1)] = Bx[:-1]-By[:-1], Bx[:-1]+By[:-1]
H[diagonal(2*N)]=Bz+np.repeat(e,2)
t=-np.repeat(t,2)
aRy=np.repeat(aRy,2)
aRy[1::2]= -aRy[::2]
H[diagonal(2*N,k=-2)], H[diagonal(2*N,k=2)] = t[2::]+1j*aRy[2::], t[2::]-1j*aRy[2::]
H[diagonal(2*N,k=1,step=2,init=1)] += -aRz[1::]
H[diagonal(2*N,k=-1,step=2,init=1)] += -aRz[1::]
H[diagonal(2*N,k=3,step=2)] += aRz[1::]
H[diagonal(2*N,k=-3,step=2)] += aRz[1::]
#Build it in momentum space if required:
if space=='momentum':
if sparse=='no':
H_k = np.zeros((int(2 * N), int(2 * N), int(n_k)),dtype=complex)
for i in range(n_k):
H_k[:,:,i]=H
H_k[2 * (N - 1):2 * (N - 1) + 2, 0: 2,i] += np.array([[-t[2]-1j*aRy[2], aRz[1]], [-aRz[1], -t[2]+1j*aRy[2]]])*np.exp(-1j*k_vec[i]*N)
H_k[0: 2, 2 * (N - 1):2 * (N - 1) + 2,i] += np.array([[-t[2]+1j*aRy[2], -aRz[1]], [aRz[1], -t[2]-1j*aRy[2]]])*np.exp(1j*k_vec[i]*N)
return (H_k)
elif sparse=='yes':
return (H)
else:
return (H)
#%%
def LO_2D_builder(N,dis,m_eff,mu,B,aR, d, space='position', k_vec=np.nan ,sparse='no'):
"""
2D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 2D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 2D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 2D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 2D array with the on-site Rashba couplings in the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is a 2D array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Ny, Nz = N[0], N[1]
if np.ndim(dis)==0:
dis_y, dis_z = dis, dis
else:
dis_y, dis_z = dis[0], dis[1]
m = 4 * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones(N)
aRy=aR[1]*np.ones(N)
aRz=aR[2]*np.ones(N)
else:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
if np.isscalar(d):
d = d * np.ones(N)
#Obtain the eigenenergies:
ty=cons.hbar**2/(2*(m_eff[1::,:]+m_eff[:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,1::]+m_eff[:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*ty[0,:].reshape(1,Nz),np.append(ty[1::,:]+ty[:-1,:],2*ty[-1,:].reshape(1,Nz),axis=0),axis=0)
e += np.append(2*tz[:,0].reshape(Ny,1),np.append(tz[:,1::]+tz[:,:-1],2*tz[:,-1].reshape(Ny,1),axis=1),axis=1)
#Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,d,Bx,By,Bz=e.flatten(),d.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[1::,:]+aRx[:-1,:])/(4*dis_y)).flatten(),2), ((aRz[1::,:]+aRz[:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,1::]+aRx[:,:-1])/(4*dis_z)).flatten(), ((aRy[:,1::]+aRy[:,:-1])/(4*dis_z)).flatten()
aRx_ky[1::2] = -aRx_ky[::2]
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny,2*(Nz-1)),2),np.zeros(2*(Ny-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1)))
for i in range(2):
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1,step=2)], H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1,step=2)] = (-1)**(i)*Bx-1j*By, (-1)**(i)*Bx+1j*By
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i)] = (-1)**(i)*(np.repeat(e,2) + Bz)
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2*Nz)] = -1*(-1)**(i)*ty+1j*aRx_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2*Nz)] = -1*(-1)**(i)*ty-1j*aRx_ky
H[diagonal(int(m/2)*(i+1),k=2*Nz-1,step=2,init=1+int(m/2)*i)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),k=-2*Nz+1,step=2,init=1+int(m/2)*i)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-1-2*Nz,step=2)] += 1j*aRz_ky
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-2)] = -1*(-1)**(i)*tz
H[diagonal(int(m/2)*(i+1),k=1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),k=-1,step=2,init=1+int(m/2)*i)] += (-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=3,step=2)] += -1*(-1)**(i)*aRx_kz+1j*aRy_kz
H[diagonal(int(m/2)*(i+1),init=int(m/2)*i,k=-3,step=2)] += -1*(-1)**(i)*aRx_kz-1j*aRy_kz
H[diagonal(m,k=int(m/2)+1,step=2)], H[diagonal(m,k=-int(m/2)-1,step=2)] = -np.conj(d), -d
H[diagonal(m,k=int(m/2)-1,step=2,init=1)], H[diagonal(m,k=-int(m/2)+1,step=2,init=1)] = np.conj(d), d
return (H)
#%%
def LO_2D_builder_NoSC(N,dis,m_eff,mu,B,aR, space='position', k_vec=np.nan ,sparse='no'):
"""
2D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 2D
Lutchy-Oreg chain without superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 2D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 2D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 2D array with the on-site Rashba couplings in the direction i.
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Ny, Nz = N[0], N[1]
if np.ndim(dis)==0:
dis_y, dis_z = dis, dis
else:
dis_y, dis_z = dis[0], dis[1]
m = 2 * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
if np.ndim(aR)==0:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
elif np.ndim(aR)==1:
if len(aR)==3:
aRx=aR[0]*np.ones(N)
aRy=aR[1]*np.ones(N)
aRz=aR[2]*np.ones(N)
else:
aRx=np.zeros(N)
aRy=np.zeros(N)
aRz=aR*np.ones(N)
else:
aRx=aR[0]
aRy=aR[1]
aRz=aR[2]
#Obtain the eigenenergies:
ty=cons.hbar**2/(2*(m_eff[1::,:]+m_eff[:-1,:])/2*cons.m_e*(dis_y*1e-9)**2)/cons.e*1e3
tz=cons.hbar**2/(2*(m_eff[:,1::]+m_eff[:,:-1])/2*cons.m_e*(dis_z*1e-9)**2)/cons.e*1e3
e = - mu
e += np.append(2*ty[0,:].reshape(1,Nz),np.append(ty[1::,:]+ty[:-1,:],2*ty[-1,:].reshape(1,Nz),axis=0),axis=0)
e += np.append(2*tz[:,0].reshape(Ny,1),np.append(tz[:,1::]+tz[:,:-1],2*tz[:,-1].reshape(Ny,1),axis=1),axis=1)
#Build the Hamiltonian:
if sparse=='no':
H = np.zeros((int(m), int(m)),dtype=complex)
elif sparse=='yes':
H = scipy.sparse.dok_matrix((int(m),int(m)),dtype=complex)
e,Bx,By,Bz=e.flatten(),Bx.flatten(),By.flatten(),Bz.flatten()
Bz=np.repeat(Bz,2)
Bz[1::2] = -Bz[::2]
ty, aRx_ky, aRz_ky = np.repeat(ty.flatten(),2), np.repeat(((aRx[1::,:]+aRx[:-1,:])/(4*dis_y)).flatten(),2), ((aRz[1::,:]+aRz[:-1,:])/(4*dis_y)).flatten()
tz, aRx_kz, aRy_kz = np.repeat(tz.flatten(),2), ((aRx[:,1::]+aRx[:,:-1])/(4*dis_z)).flatten(), ((aRy[:,1::]+aRy[:,:-1])/(4*dis_z)).flatten()
aRx_ky[1::2] = -aRx_ky[::2]
H[diagonal(m,k=1,step=2)], H[diagonal(m,k=-1,step=2)] = Bx-1j*By, Bx+1j*By
H[diagonal(m)] = np.repeat(e,2) + Bz
H[diagonal(m,k=2*Nz)] = -ty+1j*aRx_ky
H[diagonal(m,k=-2*Nz)] = -ty-1j*aRx_ky
H[diagonal(m,k=2*Nz-1,step=2,init=1)] += -1j*aRz_ky
H[diagonal(m,k=-2*Nz+1,step=2,init=1)] += 1j*aRz_ky
H[diagonal(m,k=1+2*Nz,step=2)] += -1j*aRz_ky
H[diagonal(m,k=-1-2*Nz,step=2)] += 1j*aRz_ky
tz, aRx_kz, aRy_kz=np.insert(tz,np.repeat(np.arange(2*(Nz-1),2*(Nz-1)*Ny,2*(Nz-1)),2),np.zeros(2*(Ny-1))), np.insert(aRx_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1))), np.insert(aRy_kz,np.arange((Nz-1),(Nz-1)*Ny,(Nz-1)),np.zeros((Ny-1)))
H[diagonal(m,k=2)] = -tz
H[diagonal(m,k=-2)] = -tz
H[diagonal(m,k=1,step=2,init=1)] += aRx_kz+1j*aRy_kz
H[diagonal(m,k=-1,step=2,init=1)] += aRx_kz-1j*aRy_kz
H[diagonal(m,k=3,step=2)] += -aRx_kz+1j*aRy_kz
H[diagonal(m,k=-3,step=2)] += -aRx_kz-1j*aRy_kz
return (H)
#%%
def LO_3D_builder(N,dis,m_eff,mu,B,aR,d, space='position', k_vec=np.nan ,sparse='yes'):
"""
3D Lutchy-Oreg Hamiltonian builder. It obtaines the Hamiltoninan for a 3D
Lutchy-Oreg chain with superconductivity.
Parameters
----------
N: arr
Number of sites in each direction.
dis: int or arr
Distance (in nm) between sites.
m_eff: int or arr
Effective mass. If it is a 3D array, each element is the on-site
effective mass.
mu: float or arr
Chemical potential. If it is a 3D array, each element is the
on-site chemical potential
B: float or arr
Zeeman splitting. If it is an array, each element is the Zeeman
splitting in each direction.
aR: float or arr
Rashba coupling.
-If aR is a float, aR is the Rashba coupling along the z-direction,
with the same value in every site.
-If aR is a 1D array with length=3, each element of the array is
the rashba coupling in each direction.
-If aR is an array of arrays (3 x N), each element of aR[i] is
a 3D array with the on-site Rashba couplings in the direction i.
d: float or arr
Superconductor paring amplitud.
-If d is a float, d is the Rashba coupling along the y-direction,
with the same value in every site.
-If d is a 3D array, each element of the array is the on-site
superconducting paring amplitud
space: {"position","momentum"}
Space in which the Hamiltonian is built. "position" means
real-space (r-space). In this case the boundary conditions are open.
On the other hand, "momentum" means reciprocal space (k-space). In
this case the built Hamiltonian corresponds to the Hamiltonian of
the unit cell, with periodic boundary conditions along the
x-direction.
k_vec: arr
If space=='momentum', k_vec is the (discretized) momentum vector,
usually in the First Brillouin Zone.
sparse: {"yes","no"}
Sparsety of the built Hamiltonian. "yes" builds a dok_sparse matrix,
while "no" builds a dense matrix.
Returns
-------
H: arr
Hamiltonian matrix.
"""
#Obtain the dimensions:
Nx, Ny, Nz = N[0], N[1], N[2]
if np.ndim(dis)==0:
dis_x, dis_y, dis_z = dis, dis, dis
else:
dis_x, dis_y, dis_z = dis[0], dis[1], dis[2]
m = 4 * Nx * Ny * Nz
#Make sure that the onsite parameters are arrays:
if np.isscalar(m_eff):
m_eff = m_eff * np.ones((Nx,Ny,Nz))
if np.isscalar(mu):
mu = mu * np.ones((Nx,Ny,Nz))
if np.isscalar(B):
Bx=B
By=0
Bz=0
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz*np.ones(N)
elif np.ndim(B)==1 and len(B)==3:
Bx=B[0]
By=B[1]
Bz=B[2]
Bx,By,Bz=Bx*np.ones(N),By*np.ones(N),Bz* | np.ones(N) | numpy.ones |
# Copyright (c) 2012-2020 Jicamarca Radio Observatory
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
"""Spectra processing Unit and operations
Here you will find the processing unit `SpectraProc` and several operations
to work with Spectra data type
"""
import time
import itertools
import numpy
from schainpy.model.proc.jroproc_base import ProcessingUnit, MPDecorator, Operation
from schainpy.model.data.jrodata import Spectra
from schainpy.model.data.jrodata import hildebrand_sekhon
from schainpy.utils import log
class SpectraProc(ProcessingUnit):
def __init__(self):
ProcessingUnit.__init__(self)
self.buffer = None
self.firstdatatime = None
self.profIndex = 0
self.dataOut = Spectra()
self.id_min = None
self.id_max = None
self.setupReq = False #Agregar a todas las unidades de proc
def __updateSpecFromVoltage(self):
self.dataOut.timeZone = self.dataIn.timeZone
self.dataOut.dstFlag = self.dataIn.dstFlag
self.dataOut.errorCount = self.dataIn.errorCount
self.dataOut.useLocalTime = self.dataIn.useLocalTime
try:
self.dataOut.processingHeaderObj = self.dataIn.processingHeaderObj.copy()
except:
pass
self.dataOut.radarControllerHeaderObj = self.dataIn.radarControllerHeaderObj.copy()
self.dataOut.systemHeaderObj = self.dataIn.systemHeaderObj.copy()
self.dataOut.channelList = self.dataIn.channelList
self.dataOut.heightList = self.dataIn.heightList
self.dataOut.dtype = numpy.dtype([('real', '<f4'), ('imag', '<f4')])
self.dataOut.nProfiles = self.dataOut.nFFTPoints
self.dataOut.flagDiscontinuousBlock = self.dataIn.flagDiscontinuousBlock
self.dataOut.utctime = self.firstdatatime
self.dataOut.flagDecodeData = self.dataIn.flagDecodeData
self.dataOut.flagDeflipData = self.dataIn.flagDeflipData
self.dataOut.flagShiftFFT = False
self.dataOut.nCohInt = self.dataIn.nCohInt
self.dataOut.nIncohInt = 1
self.dataOut.windowOfFilter = self.dataIn.windowOfFilter
self.dataOut.frequency = self.dataIn.frequency
self.dataOut.realtime = self.dataIn.realtime
self.dataOut.azimuth = self.dataIn.azimuth
self.dataOut.zenith = self.dataIn.zenith
self.dataOut.beam.codeList = self.dataIn.beam.codeList
self.dataOut.beam.azimuthList = self.dataIn.beam.azimuthList
self.dataOut.beam.zenithList = self.dataIn.beam.zenithList
def __getFft(self):
"""
Convierte valores de Voltaje a Spectra
Affected:
self.dataOut.data_spc
self.dataOut.data_cspc
self.dataOut.data_dc
self.dataOut.heightList
self.profIndex
self.buffer
self.dataOut.flagNoData
"""
fft_volt = numpy.fft.fft(
self.buffer, n=self.dataOut.nFFTPoints, axis=1)
fft_volt = fft_volt.astype(numpy.dtype('complex'))
dc = fft_volt[:, 0, :]
# calculo de self-spectra
fft_volt = numpy.fft.fftshift(fft_volt, axes=(1,))
spc = fft_volt * numpy.conjugate(fft_volt)
spc = spc.real
blocksize = 0
blocksize += dc.size
blocksize += spc.size
cspc = None
pairIndex = 0
if self.dataOut.pairsList != None:
# calculo de cross-spectra
cspc = numpy.zeros(
(self.dataOut.nPairs, self.dataOut.nFFTPoints, self.dataOut.nHeights), dtype='complex')
for pair in self.dataOut.pairsList:
if pair[0] not in self.dataOut.channelList:
raise ValueError("Error getting CrossSpectra: pair 0 of %s is not in channelList = %s" % (
str(pair), str(self.dataOut.channelList)))
if pair[1] not in self.dataOut.channelList:
raise ValueError("Error getting CrossSpectra: pair 1 of %s is not in channelList = %s" % (
str(pair), str(self.dataOut.channelList)))
cspc[pairIndex, :, :] = fft_volt[pair[0], :, :] * \
numpy.conjugate(fft_volt[pair[1], :, :])
pairIndex += 1
blocksize += cspc.size
self.dataOut.data_spc = spc
self.dataOut.data_cspc = cspc
self.dataOut.data_dc = dc
self.dataOut.blockSize = blocksize
self.dataOut.flagShiftFFT = False
def run(self, nProfiles=None, nFFTPoints=None, pairsList=None, ippFactor=None, shift_fft=False):
if self.dataIn.type == "Spectra":
self.dataOut.copy(self.dataIn)
if shift_fft:
#desplaza a la derecha en el eje 2 determinadas posiciones
shift = int(self.dataOut.nFFTPoints/2)
self.dataOut.data_spc = numpy.roll(self.dataOut.data_spc, shift , axis=1)
if self.dataOut.data_cspc is not None:
#desplaza a la derecha en el eje 2 determinadas posiciones
self.dataOut.data_cspc = numpy.roll(self.dataOut.data_cspc, shift, axis=1)
if pairsList:
self.__selectPairs(pairsList)
elif self.dataIn.type == "Voltage":
self.dataOut.flagNoData = True
if nFFTPoints == None:
raise ValueError("This SpectraProc.run() need nFFTPoints input variable")
if nProfiles == None:
nProfiles = nFFTPoints
if ippFactor == None:
self.dataOut.ippFactor = 1
self.dataOut.nFFTPoints = nFFTPoints
if self.buffer is None:
self.buffer = numpy.zeros((self.dataIn.nChannels,
nProfiles,
self.dataIn.nHeights),
dtype='complex')
if self.dataIn.flagDataAsBlock:
nVoltProfiles = self.dataIn.data.shape[1]
if nVoltProfiles == nProfiles:
self.buffer = self.dataIn.data.copy()
self.profIndex = nVoltProfiles
elif nVoltProfiles < nProfiles:
if self.profIndex == 0:
self.id_min = 0
self.id_max = nVoltProfiles
self.buffer[:, self.id_min:self.id_max,
:] = self.dataIn.data
self.profIndex += nVoltProfiles
self.id_min += nVoltProfiles
self.id_max += nVoltProfiles
else:
raise ValueError("The type object %s has %d profiles, it should just has %d profiles" % (
self.dataIn.type, self.dataIn.data.shape[1], nProfiles))
self.dataOut.flagNoData = True
else:
self.buffer[:, self.profIndex, :] = self.dataIn.data.copy()
self.profIndex += 1
if self.firstdatatime == None:
self.firstdatatime = self.dataIn.utctime
if self.profIndex == nProfiles:
self.__updateSpecFromVoltage()
if pairsList == None:
self.dataOut.pairsList = [pair for pair in itertools.combinations(self.dataOut.channelList, 2)]
else:
self.dataOut.pairsList = pairsList
self.__getFft()
self.dataOut.flagNoData = False
self.firstdatatime = None
self.profIndex = 0
else:
raise ValueError("The type of input object '%s' is not valid".format(
self.dataIn.type))
def __selectPairs(self, pairsList):
if not pairsList:
return
pairs = []
pairsIndex = []
for pair in pairsList:
if pair[0] not in self.dataOut.channelList or pair[1] not in self.dataOut.channelList:
continue
pairs.append(pair)
pairsIndex.append(pairs.index(pair))
self.dataOut.data_cspc = self.dataOut.data_cspc[pairsIndex]
self.dataOut.pairsList = pairs
return
def selectFFTs(self, minFFT, maxFFT ):
"""
Selecciona un bloque de datos en base a un grupo de valores de puntos FFTs segun el rango
minFFT<= FFT <= maxFFT
"""
if (minFFT > maxFFT):
raise ValueError("Error selecting heights: Height range (%d,%d) is not valid" % (minFFT, maxFFT))
if (minFFT < self.dataOut.getFreqRange()[0]):
minFFT = self.dataOut.getFreqRange()[0]
if (maxFFT > self.dataOut.getFreqRange()[-1]):
maxFFT = self.dataOut.getFreqRange()[-1]
minIndex = 0
maxIndex = 0
FFTs = self.dataOut.getFreqRange()
inda = numpy.where(FFTs >= minFFT)
indb = numpy.where(FFTs <= maxFFT)
try:
minIndex = inda[0][0]
except:
minIndex = 0
try:
maxIndex = indb[0][-1]
except:
maxIndex = len(FFTs)
self.selectFFTsByIndex(minIndex, maxIndex)
return 1
def getBeaconSignal(self, tauindex=0, channelindex=0, hei_ref=None):
newheis = numpy.where(
self.dataOut.heightList > self.dataOut.radarControllerHeaderObj.Taus[tauindex])
if hei_ref != None:
newheis = numpy.where(self.dataOut.heightList > hei_ref)
minIndex = min(newheis[0])
maxIndex = max(newheis[0])
data_spc = self.dataOut.data_spc[:, :, minIndex:maxIndex + 1]
heightList = self.dataOut.heightList[minIndex:maxIndex + 1]
# determina indices
nheis = int(self.dataOut.radarControllerHeaderObj.txB /
(self.dataOut.heightList[1] - self.dataOut.heightList[0]))
avg_dB = 10 * \
numpy.log10(numpy.sum(data_spc[channelindex, :, :], axis=0))
beacon_dB = numpy.sort(avg_dB)[-nheis:]
beacon_heiIndexList = []
for val in avg_dB.tolist():
if val >= beacon_dB[0]:
beacon_heiIndexList.append(avg_dB.tolist().index(val))
#data_spc = data_spc[:,:,beacon_heiIndexList]
data_cspc = None
if self.dataOut.data_cspc is not None:
data_cspc = self.dataOut.data_cspc[:, :, minIndex:maxIndex + 1]
#data_cspc = data_cspc[:,:,beacon_heiIndexList]
data_dc = None
if self.dataOut.data_dc is not None:
data_dc = self.dataOut.data_dc[:, minIndex:maxIndex + 1]
#data_dc = data_dc[:,beacon_heiIndexList]
self.dataOut.data_spc = data_spc
self.dataOut.data_cspc = data_cspc
self.dataOut.data_dc = data_dc
self.dataOut.heightList = heightList
self.dataOut.beacon_heiIndexList = beacon_heiIndexList
return 1
def selectFFTsByIndex(self, minIndex, maxIndex):
"""
"""
if (minIndex < 0) or (minIndex > maxIndex):
raise ValueError("Error selecting heights: Index range (%d,%d) is not valid" % (minIndex, maxIndex))
if (maxIndex >= self.dataOut.nProfiles):
maxIndex = self.dataOut.nProfiles-1
#Spectra
data_spc = self.dataOut.data_spc[:,minIndex:maxIndex+1,:]
data_cspc = None
if self.dataOut.data_cspc is not None:
data_cspc = self.dataOut.data_cspc[:,minIndex:maxIndex+1,:]
data_dc = None
if self.dataOut.data_dc is not None:
data_dc = self.dataOut.data_dc[minIndex:maxIndex+1,:]
self.dataOut.data_spc = data_spc
self.dataOut.data_cspc = data_cspc
self.dataOut.data_dc = data_dc
self.dataOut.ippSeconds = self.dataOut.ippSeconds*(self.dataOut.nFFTPoints / numpy.shape(data_cspc)[1])
self.dataOut.nFFTPoints = numpy.shape(data_cspc)[1]
self.dataOut.profilesPerBlock = numpy.shape(data_cspc)[1]
return 1
def getNoise(self, minHei=None, maxHei=None, minVel=None, maxVel=None):
# validacion de rango
if minHei == None:
minHei = self.dataOut.heightList[0]
if maxHei == None:
maxHei = self.dataOut.heightList[-1]
if (minHei < self.dataOut.heightList[0]) or (minHei > maxHei):
print('minHei: %.2f is out of the heights range' % (minHei))
print('minHei is setting to %.2f' % (self.dataOut.heightList[0]))
minHei = self.dataOut.heightList[0]
if (maxHei > self.dataOut.heightList[-1]) or (maxHei < minHei):
print('maxHei: %.2f is out of the heights range' % (maxHei))
print('maxHei is setting to %.2f' % (self.dataOut.heightList[-1]))
maxHei = self.dataOut.heightList[-1]
# validacion de velocidades
velrange = self.dataOut.getVelRange(1)
if minVel == None:
minVel = velrange[0]
if maxVel == None:
maxVel = velrange[-1]
if (minVel < velrange[0]) or (minVel > maxVel):
print('minVel: %.2f is out of the velocity range' % (minVel))
print('minVel is setting to %.2f' % (velrange[0]))
minVel = velrange[0]
if (maxVel > velrange[-1]) or (maxVel < minVel):
print('maxVel: %.2f is out of the velocity range' % (maxVel))
print('maxVel is setting to %.2f' % (velrange[-1]))
maxVel = velrange[-1]
# seleccion de indices para rango
minIndex = 0
maxIndex = 0
heights = self.dataOut.heightList
inda = numpy.where(heights >= minHei)
indb = numpy.where(heights <= maxHei)
try:
minIndex = inda[0][0]
except:
minIndex = 0
try:
maxIndex = indb[0][-1]
except:
maxIndex = len(heights)
if (minIndex < 0) or (minIndex > maxIndex):
raise ValueError("some value in (%d,%d) is not valid" % (
minIndex, maxIndex))
if (maxIndex >= self.dataOut.nHeights):
maxIndex = self.dataOut.nHeights - 1
# seleccion de indices para velocidades
indminvel = numpy.where(velrange >= minVel)
indmaxvel = numpy.where(velrange <= maxVel)
try:
minIndexVel = indminvel[0][0]
except:
minIndexVel = 0
try:
maxIndexVel = indmaxvel[0][-1]
except:
maxIndexVel = len(velrange)
# seleccion del espectro
data_spc = self.dataOut.data_spc[:,
minIndexVel:maxIndexVel + 1, minIndex:maxIndex + 1]
# estimacion de ruido
noise = numpy.zeros(self.dataOut.nChannels)
for channel in range(self.dataOut.nChannels):
daux = data_spc[channel, :, :]
sortdata = numpy.sort(daux, axis=None)
noise[channel] = hildebrand_sekhon(sortdata, self.dataOut.nIncohInt)
self.dataOut.noise_estimation = noise.copy()
return 1
class removeDC(Operation):
def run(self, dataOut, mode=2):
self.dataOut = dataOut
jspectra = self.dataOut.data_spc
jcspectra = self.dataOut.data_cspc
num_chan = jspectra.shape[0]
num_hei = jspectra.shape[2]
if jcspectra is not None:
jcspectraExist = True
num_pairs = jcspectra.shape[0]
else:
jcspectraExist = False
freq_dc = int(jspectra.shape[1] / 2)
ind_vel = numpy.array([-2, -1, 1, 2]) + freq_dc
ind_vel = ind_vel.astype(int)
if ind_vel[0] < 0:
ind_vel[list(range(0, 1))] = ind_vel[list(range(0, 1))] + self.num_prof
if mode == 1:
jspectra[:, freq_dc, :] = (
jspectra[:, ind_vel[1], :] + jspectra[:, ind_vel[2], :]) / 2 # CORRECCION
if jcspectraExist:
jcspectra[:, freq_dc, :] = (
jcspectra[:, ind_vel[1], :] + jcspectra[:, ind_vel[2], :]) / 2
if mode == 2:
vel = numpy.array([-2, -1, 1, 2])
xx = numpy.zeros([4, 4])
for fil in range(4):
xx[fil, :] = vel[fil]**numpy.asarray(list(range(4)))
xx_inv = numpy.linalg.inv(xx)
xx_aux = xx_inv[0, :]
for ich in range(num_chan):
yy = jspectra[ich, ind_vel, :]
jspectra[ich, freq_dc, :] = numpy.dot(xx_aux, yy)
junkid = jspectra[ich, freq_dc, :] <= 0
cjunkid = sum(junkid)
if cjunkid.any():
jspectra[ich, freq_dc, junkid.nonzero()] = (
jspectra[ich, ind_vel[1], junkid] + jspectra[ich, ind_vel[2], junkid]) / 2
if jcspectraExist:
for ip in range(num_pairs):
yy = jcspectra[ip, ind_vel, :]
jcspectra[ip, freq_dc, :] = numpy.dot(xx_aux, yy)
self.dataOut.data_spc = jspectra
self.dataOut.data_cspc = jcspectra
return self.dataOut
class removeInterference(Operation):
def removeInterference2(self):
cspc = self.dataOut.data_cspc
spc = self.dataOut.data_spc
Heights = numpy.arange(cspc.shape[2])
realCspc = | numpy.abs(cspc) | numpy.abs |
#! /usr/bin/env python3
"""
fekete - Estimation of Fekete points on a unit sphere
This module implements the core algorithm put forward in [1],
allowing users to estimate the locations of N equidistant points on a
unit sphere.
[1] <NAME>., <NAME>., <NAME>., & <NAME>. Estimation of
Fekete points (2007), J Comp. Phys. 225, pp 2354--2376
https://doi.org/10.1016/j.jcp.2007.03.017
"""
# Created: Sat Jun 19, 2021 06:21pm Last modified: Sat Jun 19, 2021 06:21pm
#
# Copyright (C) 2021 <NAME> <<EMAIL>> This
# program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import numpy as np
from scipy.spatial.distance import pdist
from tqdm import tqdm
from numba import jit
from scipy.spatial import SphericalVoronoi
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import climnet.utils.general_utils as gut
G = 6.67408 * 1E-11 # m^3 / kg / s^2
def bendito(N=100, a=1., X=None, maxiter=1000,
break_th=0.001,
verbose=True):
"""
Return the Fekete points according to the Bendito et al. (2007) algorithm.
Parameters
----------
N : int
Number of points to be distributed on the surface of the unit sphere.
Default is `N = 100`.
a : float
Positive scalar that weights the advance direction in accordance with
the kernel under consideration and the surface (cf. Eq. 4 and Table 1
of Bendito et al., 2007). Default is `a = 1` which corresponds to the
Newtonian kernel.
X : numpy.nadarray, with shape (N, 3)
Initial configuration of points. The array consists of N observations
(rows) of 3-D (x, y, z) locations of the points. If provided, `N` is
overriden and set to `X.shape[0]`. Default is `None`.
maxiter : int
Maximum number of iterations to carry out. Since the error of the
configuration continues to decrease exponentially after a certain
number of iterations, a saturation / convergence criterion is not
implemented. Users are advised to check until the regime of exponential
decreased is reach by trying out different high values of `maxiter`.
Default is 1000.
verbose : bool
Show progress bar. Default is `True`.
Returns
-------
X_new : numpy.ndarray, with shape (N, 3)
Final configuration of `N` points on the surface of the sphere after
`maxiter` iterations. Each row contains the (x, y, z) coordinates of
the points. If `X` is provided, the `X_new` has the same shape as `X`.
dq : numpy.ndarray, with shape (maxiter,)
Maximum disequilibrium degree after each iteration. This is defined as
the maximum of the modulus of the disequilibrium vectors at each point
location. Intuitively, this can be understood as a quantity that is
proportional to the total potential energy of the current configuration
of points on the sphere's surface.
"""
# parse inputs
if X is None or len(X) == 0:
print("Initial configuration not provided. Generating random one ...")
X = points_on_sphere(N) # initial random configuration
else:
N = X.shape[0]
# core loop
# intializ parameters
dq = []
w = np.zeros(X.shape)
# set up progress bar
pb_fmt = "{desc:<5.5}{percentage:3.0f}%|{bar:30}{r_bar}"
pb_desc = "Estimating Fekete points ..."
# iterate
for k in tqdm(range(maxiter), bar_format=pb_fmt, desc=pb_desc,
disable=not verbose):
# Core steps from Bendito et al. (2007), pg 6 bottom
# 1.a. Advance direction
for i in range(len(X)):
w[i] = descent_direction_i(X, i)
# 1.b. Error as max_i |w_i|
mod_w = np.sqrt((w ** 2).sum(axis=1))
max_w = np.max(mod_w)
dq.append( | np.max(mod_w) | numpy.max |
from logging import log
import numpy as np
import pandas as pd
from scipy import interpolate
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import ScalarFormatter
from flask import Flask, render_template, request
from tkinter import *
from tkinter import ttk
import sys
import os
import shutil
import random
from matplotlib.ticker import MaxNLocator
from pathlib import Path
import math
import copy
#from decimal import Decimal, ROUND_HALF_UP
def readinput(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
symbol = csv_input['Symbol']
value = csv_input['Value']
unit = csv_input['Unit']
valueDict = {}
unitDict = {}
for i, j, k in zip(symbol, value, unit):
valueDict[i] = float(j)
unitDict[i] = str(k)
return valueDict, unitDict
def CeqLHVFunc(filename,fuelName):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
CeqLHV = csv_input['CeqLHV']
fuelDict = {}
for i, j in zip(fuelType, CeqLHV):
fuelDict[i] = float(j)
return fuelDict[fuelName]
def Cco2Func(filename,fuelName):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
fuelType = csv_input['Fuel type']
Cco2 = csv_input['Cco2']
Cco2Dict = {}
for i, j in zip(fuelType, Cco2):
Cco2Dict[i] = float(j)
return Cco2Dict[fuelName]
def initialFleetFunc(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",")
year = csv_input['Year']
TEU = csv_input['TEU']
iniFleetDict = {}
k = 0
for i, j in zip(year, TEU):
iniFleetDict.setdefault(k,{})
iniFleetDict[k]['year'] = int(i)
iniFleetDict[k]['TEU'] = float(j)
k += 1
return iniFleetDict
def decisionListFunc(filename):
csv_input = pd.read_csv(filepath_or_buffer=filename, encoding="utf_8", sep=",").fillna(0)
Year = csv_input['Year']
Order = csv_input['Order']
fuelType = csv_input['Fuel type']
WPS = csv_input['WPS']
SPS = csv_input['SPS']
CCS = csv_input['CCS']
CAP = csv_input['CAP']
Speed = csv_input['Speed']
Fee = csv_input['Fee']
valueDict = {}
for i, j, k, l, m, n, o, p, q in zip(Year, Order, fuelType, WPS, SPS, CCS, CAP, Speed, Fee):
valueDict.setdefault(int(i),{})
valueDict[int(i)]['Order'] = int(j)
valueDict[int(i)]['fuelType'] = k
valueDict[int(i)]['WPS'] = int(l)
valueDict[int(i)]['SPS'] = int(m)
valueDict[int(i)]['CCS'] = int(n)
valueDict[int(i)]['CAP'] = float(o)
valueDict[int(i)]['Speed'] = float(p)
valueDict[int(i)]['Fee'] = float(q)
return valueDict
def fleetPreparationFunc(fleetAll,initialFleetFile,numCompany,startYear,lastYear,elapsedYear,tOpSch,tbid,valueDict,NShipFleet,parameterFile2,parameterFile12,parameterFile3,parameterFile5):
fleetAll.setdefault(numCompany,{})
fleetAll[numCompany].setdefault('total',{})
fleetAll[numCompany]['total']['sale'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['g'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['gTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['saleTilde'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['cta'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['overDi'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costShipBasicHFO'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costShip'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['costFuel'] = np.zeros(lastYear-startYear+1)
fleetAll[numCompany]['total']['dcostFuel'] = | np.zeros(lastYear-startYear+1) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
# @file name : camvid_dataset.py
# @author : JLChen
# @date : 2019-08-21 10:08:00
# @brief : CamVid数据集的Dataset定义
"""
import os
import torch
from PIL import Image
from torch.utils.data import Dataset
import torchvision.transforms.functional as ff
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
class LabelProcessor:
"""对标签图像的编码"""
def __init__(self, file_path):
self.colormap = self.read_color_map(file_path) # 读取csv,获得label对应的rgb值, csv --> rgb
self.cm2lbl = self.encode_label_pix(self.colormap) # 对label的rgb值制作映射矩阵,rgb --> cls index
self.names = pd.read_csv(file_path, sep=',').name.tolist()
# 静态方法装饰器, 可以理解为定义在类中的普通函数,可以用self.<name>方式调用
# 在静态方法内部不可以示例属性和实列对象,即不可以调用self.相关的内容
# 使用静态方法的原因之一是程序设计的需要(简洁代码,封装功能等)
@staticmethod
def read_color_map(file_path): # data process and load.ipynb: 处理标签文件中colormap的数据
"""
读取csv中信息,获得各类别标签的rgb像素,以list形式返回
:param file_path:
:return: list, list[0] == [128, 128, 128] ...
"""
pd_label_color = pd.read_csv(file_path, sep=',')
colormap = []
for i in range(len(pd_label_color.index)):
tmp = pd_label_color.iloc[i]
color = [tmp['r'], tmp['g'], tmp['b']]
colormap.append(color)
return colormap
@staticmethod
def encode_label_pix(colormap):
"""
生成标签编码,返回哈希表
key是像素值的编码,编码公式:(cm[0] * 256 + cm[1]) * 256 + cm[2]
value是类别,0,1,2,3,...,11
:param colormap:
:return: ndarray, shape = (16777216,)
"""
cm2lbl = np.zeros(256 ** 3)
for i, cm in enumerate(colormap):
cm2lbl[(cm[0] * 256 + cm[1]) * 256 + cm[2]] = i
return cm2lbl
def encode_label_img(self, img):
"""
将rgb像素转换为 0-11 的标签形式
:param img:
:return:
"""
data = | np.array(img, dtype='int32') | numpy.array |
# -*- coding: utf-8 -*-
"""Main module."""
simulation = True
import sys
import copy
import math
import time
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN
# from sample import Sample
# from cluster import Cluster
# from microCluster import MicroCluster
class Sample():
"""
Each record of the stream has to be declared as a `Sample` class.
:param value: the `values` of the current sample.
:param timestamp: the `timestamp` of current sample.
"""
def __init__(self, value, timestamp: int):
self.value = value
self.timestamp = 0
self.realTimestamp = timestamp
def getValue(self):
"""
:return: :attr:`value`
"""
return self.value
def setTimestamp(self, timestamp: int):
"""
:set: :attr:`timestamp`
"""
self.timestamp = timestamp
def setRealTimestamp(self, timestamp):
self.realTimestamp = timestamp
def setMicroClusterNumber(self, microClusterNumber: int):
"""
Assign to each sample the microClusterNumber in which was merged.
:set: :attr:`microClusterNumber`
"""
self.microClusterNumber = microClusterNumber
def computeReductionFactor(lamb, steps):
return math.pow(2, -lamb * steps)
class MicroCluster():
"""
Micro-Cluster class
:param currenttimestamp: the `timestamp` in which the cluster is created.
:param lamb: the `lamb` parameter used as decay factor.
:param clusterNumber: the `number` of the micro-cluster.
"""
def __init__(self, currenttimestamp, lamb, clusterNumber):
self.dimensions = None
self.creationTimeStamp = currenttimestamp
self.lamb = lamb
self.reductionFactor = computeReductionFactor(self.lamb, 1)
self.clusterNumber = clusterNumber
def insertSample(self, sample, timestamp=0):
"""
Adds a sample to a micro-cluster. Updates the variables of the micro-cluster with :meth:`updateRealTimeWeight` and :meth:`updateRealTimeLSandSS`
:param sample: the `sample` object
:param timestamp: deprecated, not needed anymore. Will be removed in the next versions.
"""
if self.dimensions == None:
if isinstance(sample.value, type(list)):
self.dimensions = len(sample.value)
elif isinstance(sample.value, float):
self.dimensions = 1
elif isinstance(sample.value, np.ndarray):
self.dimensions = len(sample.value)
else:
print ('Received {}'.format(sample.value))
print ('Type {}'.format(type(sample.value)))
sys.exit('Error instance sample.value type')
### incremental parameteres ###
self.N = 0
self.weight = 0
self.LS = np.zeros(self.dimensions)
self.SS = np.zeros(self.dimensions)
self.center = np.zeros(self.dimensions)
self.radius = 0
self.N += 1
self.updateRealTimeWeight()
self.updateRealTimeLSandSS(sample)
def updateRealTimeWeight(self):
"""
Updates the Weight of the micro-cluster by the fading factor and increases it by 1.
"""
self.weight *= self.reductionFactor
self.weight += 1
def updateRealTimeLSandSS(self, sample):
"""
Updates the `Weighted Linear Sum` (WLS), the `Weighted Squared Sum` (WSS), the `center` and the `radius` of the micro-cluster when a new sample is merged.
:param sample: the `sample` to merge into the micro-cluster.
"""
sample = np.array(sample.value)
self.LS = np.multiply(self.LS, self.reductionFactor)
self.SS = np.multiply(self.SS, self.reductionFactor)
self.LS = self.LS + sample
self.SS = self.SS + np.power(sample, 2)
self.center = np.divide(self.LS, float(self.weight))
LSd = np.power(self.center, 2)
SSd = np.divide(self.SS, float(self.weight))
maxRad = np.nanmax(np.sqrt(SSd.astype(float)-LSd.astype(float)))
# maxRad = np.nanmax(np.lib.scimath.sqrt(SSd-LSd))
self.radius = maxRad
def noNewSamples(self):
"""
Updates the `Weighted Linear Sum` (WLS), the `Weighted Squared Sum` (WSS) and the weight of the micro-cluster when no new samples are merged.
"""
self.LS = | np.multiply(self.LS, self.reductionFactor) | numpy.multiply |
'''
PlotTrace.py
Executable for plotting trace stats of learning algorithm progress, including
* objective function (ELBO) vs laps thru data
* number of active components vs laps thru data
* hamming distance vs laps thru data
Usage (command-line)
-------
python -m bnpy.viz.PlotTrace dataName jobpattern [kwargs]
'''
from builtins import *
import numpy as np
import argparse
import glob
import os
import scipy.io
from .PlotUtil import pylab
from bnpy.ioutil import BNPYArgParser
from bnpy.ioutil.CountReader import loadKeffForTask
from .JobFilter import filterJobs
taskidsHelpMsg = "ids of trials/runs to plot from given job." + \
" Example: '4' or '1,2,3' or '2-6'."
Colors = [(0, 0, 0), # black
(0, 0, 1), # blue
(1, 0, 0), # red
(0, 1, 0.25), # green (darker)
(1, 0, 1), # magenta
(0, 1, 1), # cyan
(1, 0.6, 0), # orange
]
LabelMap = dict(laps='num pass thru data',
iters='num alg steps',
times='elapsed time (sec)',
K='num topics K',
evidence='train objective',
)
LabelMap['laps-saved-params'] = 'num pass thru data'
LabelMap['hamming-distance'] = 'Hamming dist.'
LabelMap['Keff'] = 'num topics K'
def plotJobsThatMatchKeywords(jpathPattern='/tmp/', **kwargs):
''' Create line plots for jobs matching pattern and provided kwargs
'''
if not jpathPattern.startswith(os.path.sep):
jpathPattern = os.path.join(os.environ['BNPYOUTDIR'], jpathPattern)
jpaths, legNames = filterJobs(jpathPattern, **kwargs)
plotJobs(jpaths, legNames, **kwargs)
def plotJobs(jpaths, legNames, styles=None, density=2,
xvar='laps', yvar='evidence', loc='upper right',
xmin=None, xmax=None,
taskids=None, savefilename=None, tickfontsize=None,
bbox_to_anchor=None, **kwargs):
''' Create line plots for provided jobs.
'''
nLines = len(jpaths)
if nLines == 0:
raise ValueError('Empty job list. Nothing to plot.')
nLeg = len(legNames)
for lineID in range(nLines):
if styles is None:
curStyle = dict(colorID=lineID)
else:
curStyle = styles[lineID]
task_kwargs = dict(**kwargs)
task_kwargs.update(curStyle)
plot_all_tasks_for_job(jpaths[lineID], legNames[lineID],
xvar=xvar, yvar=yvar,
taskids=taskids, density=density, **task_kwargs)
# Y-axis limit determination
# If we have "enough" data about the run beyond two full passes of dataset,
# we zoom in on the region of data beyond lap 2
if xvar == 'laps' and yvar == 'evidence':
xmax = 0
ymin = np.inf
ymin2 = np.inf
ymax = -np.inf
allRunsHaveXBeyond1 = True
for line in pylab.gca().get_lines():
xd = line.get_xdata()
yd = line.get_ydata()
if xd.size < 3:
allRunsHaveXBeyond1 = False
continue
posLap1 = np.searchsorted(xd, 1.0)
posLap2 = np.searchsorted(xd, 2.0)
if posLap1 < xd.size:
ymin = np.minimum(ymin, yd[posLap1])
ymax = np.maximum(ymax, yd[posLap1:].max())
if posLap2 < xd.size:
ymin2 = np.minimum(ymin2, yd[posLap2])
xmax = np.maximum(xmax, xd.max())
if xd.max() <= 1:
allRunsHaveXBeyond1 = False
if allRunsHaveXBeyond1 and xmax > 1.5:
# If all relevant curves extend beyond x=1, only show that part
xmin = 1.0 - 1e-5
else:
xmin = 0
if allRunsHaveXBeyond1 and ymin2 < ymax:
range1 = ymax - ymin
range2 = ymax - ymin2
if 10 * range2 < range1:
# Y values jump from lap1 to lap2 is enormous,
# so let's just show y values from lap2 onward...
ymin = ymin2
if (not np.allclose(ymax, ymin)) and allRunsHaveXBeyond1:
pylab.ylim([ymin, ymax + 0.1 * (ymax - ymin)])
pylab.xlim([xmin, xmax + .05 * (xmax - xmin)])
if loc is not None and len(jpaths) > 1:
pylab.legend(loc=loc, bbox_to_anchor=bbox_to_anchor)
if tickfontsize is not None:
pylab.tick_params(axis='both', which='major', labelsize=tickfontsize)
if savefilename is not None:
try:
pylab.show(block=False)
except TypeError:
pass # when using IPython notebook
pylab.savefig(savefilename, bbox_inches='tight', pad_inches=0)
else:
try:
pylab.show(block=True)
except TypeError:
pass # when using IPython notebook
def plot_all_tasks_for_job(jobpath, label, taskids=None,
color=None,
colorID=0,
density=2,
yvar='evidence',
markersize=10,
linewidth=2,
linestyle='-',
drawLineToXMax=None,
showOnlyAfterLap=0,
xvar='laps',
**kwargs):
''' Create line plot in current figure for each task/run of jobpath
'''
if not os.path.exists(jobpath):
if not jobpath.startswith(os.path.sep):
jobpath_tmp = os.path.join(os.environ['BNPYOUTDIR'], jobpath)
if not os.path.exists(jobpath_tmp):
raise ValueError("PATH NOT FOUND: %s" % (jobpath))
jobpath = jobpath_tmp
if color is None:
color = Colors[colorID % len(Colors)]
taskids = BNPYArgParser.parse_task_ids(jobpath, taskids)
if yvar == 'hamming-distance':
yspfile = os.path.join(jobpath, taskids[0], yvar + '-saved-params.txt')
if xvar == 'laps' and os.path.isfile(yspfile):
xvar = 'laps-saved-params'
for tt, taskid in enumerate(taskids):
xs = None
ys = None
laps = None
try:
var_ext = ''
ytxtfile = os.path.join(jobpath, taskid, yvar + '.txt')
if not os.path.isfile(ytxtfile):
var_ext = '-saved-params'
ytxtfile = os.path.join(
jobpath, taskid, yvar + var_ext + '.txt')
ys = np.loadtxt(ytxtfile)
if ytxtfile.count('saved-params'):
laptxtfile = os.path.join(jobpath, taskid, 'laps-saved-params.txt')
else:
laptxtfile = os.path.join(jobpath, taskid, 'laps.txt')
except IOError as e:
# TODO: when is this code needed?
# xs, ys = loadXYFromTopicModelFiles(jobpath, taskid)
try:
if isinstance(xs, np.ndarray) and yvar.count('Keff'):
ys = loadKeffForTask(
os.path.join(jobpath, taskid), **kwargs)
assert xs.size == ys.size
else:
# Heldout metrics
xs, ys = loadXYFromTopicModelSummaryFiles(
jobpath, taskid, xvar=xvar, yvar=yvar)
if showOnlyAfterLap and showOnlyAfterLap > 0:
laps, _ = loadXYFromTopicModelSummaryFiles(
jobpath, taskid, xvar='laps', yvar=yvar)
except ValueError:
try:
xs, ys = loadXYFromTopicModelSummaryFiles(jobpath, taskid)
except ValueError:
raise e
if yvar == 'hamming-distance' or yvar == 'Keff':
if xvar == 'laps-saved-params':
# fix off-by-one error, if we save an extra dist on final lap
if xs.size == ys.size - 1:
ys = ys[:-1]
elif ys.size == xs.size - 1:
xs = xs[:-1] # fix off-by-one error, if we quit early
elif xs.size != ys.size:
# Try to subsample both time series at laps where they
# intersect
laps_x = np.loadtxt(os.path.join(jobpath, taskid, 'laps.txt'))
laps_y = np.loadtxt(os.path.join(jobpath, taskid,
'laps-saved-params.txt'))
assert xs.size == laps_x.size
if ys.size == laps_y.size - 1:
laps_y = laps_y[:-1]
xs = xs[np.in1d(laps_x, laps_y)]
ys = ys[ | np.in1d(laps_y, laps_x) | numpy.in1d |
"""
Team name: ThE raNDom WALkERS
Members: <NAME>, <NAME>, <NAME>
"""
import pandas as pd
import numpy as np
from typing import Union
import pandas as pd
import numpy as np
from typing import Union
def compute_beta_vec(bo, epsilon_vec):
"""Compute beta from distribution of f."""
sorted_f_vec = np.sort(bo.f_vec)
f0 = sorted_f_vec[0]
N0 = ((sorted_f_vec - f0) == 0).sum()
f1_index = np.where((sorted_f_vec - f0) > 0)[0][0]
f1 = sorted_f_vec[f1_index]
N1 = ((sorted_f_vec - f1) == 0).sum()
beta_vec = np.log(N1 / (N0 * epsilon_vec)) / (f1 - f0)
return beta_vec
def get_optimal_betas_df(lambda_vec, epsilon_vec, lambda_arrays_list, model_string='G1'):
"""Return a dataframe containing the optimal values for beta."""
running_list = []
model_repeated = np.repeat(model_string, len(epsilon_vec))
for i, lambda_array in enumerate(lambda_arrays_list):
lambda_repeated = np.repeat(lambda_vec[i], len(epsilon_vec))
l = list(zip(model_repeated, lambda_repeated, epsilon_vec, np.median(lambda_array, axis=0)))
for item in l:
running_list.append(item)
optimal_betas_df = pd.DataFrame.from_dict(running_list).rename(columns={0: 'model', 1: 'lambda', 2: 'epsilon', 3: 'beta'})
return optimal_betas_df
def getOptBetaSeq(generator_name: str, lambda_: Union[int, float, str]):
beta_lookup_table = pd.read_csv('generated_data/G1_and_G2_optimal_betas.csv')
if generator_name in ('G1', 'G2'):
aval_lambdas = pd.unique(beta_lookup_table[beta_lookup_table.model == generator_name]['lambda'])
correct_lambda = aval_lambdas[np.argmin(np.abs(aval_lambdas - lambda_))]
print(f'Original lambda is {round(lambda_, 2)} Corrected one is {round(correct_lambda, 2)}')
betas = beta_lookup_table.query("(model == @generator_name) & (`lambda` == @correct_lambda)")
return sorted(betas.beta)
else:
aval_lambdas = pd.unique(beta_lookup_table[beta_lookup_table.model == 'G1']['lambda'])
correct_lambda = aval_lambdas[np.argmin(np.abs(aval_lambdas - lambda_))]
betas1 = beta_lookup_table.query("(model == 'G1') & (`lambda` == @correct_lambda)")
aval_lambdas = pd.unique(beta_lookup_table[beta_lookup_table.model == 'G2']['lambda'])
correct_lambda = aval_lambdas[np.argmin(np.abs(aval_lambdas - lambda_))]
betas2 = beta_lookup_table.query("(model == 'G2') & (`lambda` == @correct_lambda)")
allbeta = np.array([sorted(betas1.beta), sorted(betas2.beta)])
return np.mean(allbeta, axis=0)
def interpBetas(betas, steps, smooth=True):
if steps <= len(betas):
return betas
if smooth:
# least square
x = np.linspace(0, steps - 1, num=len(betas), endpoint=True)
A = np.vstack([x, np.ones(len(x))]).T
y = np.log(betas)
coef, _, _, _ = np.linalg.lstsq(A, y, rcond=None)
smoothed = np.exp(coef[0] * np.arange(steps) + coef[1])
return smoothed
else:
# interpolation
xp = np.linspace(0, steps - 1, num=len(betas))
xp = np.int32(xp)
beta_full = np.interp(np.arange(steps), xp, betas)
return beta_full
def getMaxVal(val, num_cities):
max_obj_val = | np.max(val) | numpy.max |
#!/usr/bin/env python
# PROGRAM: plot_sst.py
# ----------------------------------------------------------------------------------
# Version 0.18
# 19 August, 2019
# michael.taylor AT reading DOT ac DOT uk
# PYTHON DEBUGGER CONTROL:
#------------------------
# import os; os._exit(0)
# import ipdb
# ipdb.set_trace()
import os.path
import optparse
from optparse import OptionParser
import sys
import numpy as np
import xarray
import pandas as pd
from pandas import Series, DataFrame, Panel
import seaborn as sns; sns.set(style="darkgrid")
import datetime
import matplotlib
import matplotlib.pyplot as plt; plt.close("all")
#import typhon
#from typhon.plots import plot_bitfield
#cmap = 'tab20c' # https://matplotlib.org/users/colormaps
def calc_median(counts,bins):
"""
# -------------------------------
# CALCULATE MEDIUM FROM HISTOGRAM
# -------------------------------
# M_estimated ~ L_m + [ ( N/2 - F_{m-1} ) / f_m] * c
#
# where,
#
# L_m =lower limit of the median bar
# N = is the total number of observations
# F_{m-1} = cumulative frequency (total number of observations) in all bars below the median bar
# f_m = frequency of the median bar
# c = median bar width
"""
M = 0
counts_cumsum = counts.cumsum()
counts_half = counts_cumsum[-1]/2.0
for i in np.arange(0,bins.shape[0]-1):
counts_l = counts_cumsum[i]
counts_r = counts_cumsum[i+1]
if (counts_half >= counts_l) & (counts_half < counts_r):
c = bins[1]-bins[0]
L_m = bins[i+1]
F_m_minus_1 = counts_cumsum[i]
f_m = counts[i+1]
M = L_m + ( (counts_half - F_m_minus_1) / f_m ) * c
return M
def plot_n_sst(times,n_sst_q3,n_sst_q4,n_sst_q5):
"""
# ---------------------------------------
# PLOT CUMULATIVE SST OBSERVATION DENSITY
# ---------------------------------------
"""
ocean_area = 361900000.0
t = np.array(times, dtype=np.datetime64)
years = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D') / 365.0
Q3 = pd.Series(n_sst_q3, index=times).fillna(0) / ocean_area / years
Q4 = pd.Series(n_sst_q4, index=times).fillna(0) / ocean_area / years
Q5 = pd.Series(n_sst_q5, index=times).fillna(0) / ocean_area / years
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.plot(times,df['QL=4 & 5'].cumsum(), drawstyle='steps')
plt.plot(times,df['QL=3'].cumsum(), drawstyle='steps')
plt.tick_params(labelsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
title_str = ' ' + 'QL=3:max=' + "{0:.5f}".format(df['QL=3'].cumsum().max()) + ' ' + 'QL=4 & 5:max=' + "{0:.5f}".format(df['QL=4 & 5'].cumsum().max())
print(title_str)
plt.legend(loc='best')
plt.savefig('n_sst.pdf')
# plt.savefig('n_sst.png', dpi=600)
# plt.savefig('n_sst.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_n_sst_lat(lat_vec,n_sst_q3_lat,n_sst_q4_lat,n_sst_q5_lat):
"""
# ------------------------------------------
# PLOT SST OBSERVATION DENSITY WITH LATITUDE
# ------------------------------------------
"""
interpolation = np.arange(-90,90,1)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q3_lat), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q4_lat), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q5_lat), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df['QL=3 & 4 & 5'] = df['QL=3'] + df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.fill_between(interpolation, df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(interpolation, df['QL=3'], step="post", alpha=0.4)
plt.plot(interpolation, df['QL=4 & 5'], drawstyle='steps-post', label='QL=4 & 5')
plt.plot(interpolation, df['QL=3'], drawstyle='steps-post', label='QL=3')
ax = plt.gca()
ax.set_xlim([-90,90])
ticks = ax.get_xticks()
ax.set_xticks(np.linspace(-90, 90, 7))
plt.tick_params(labelsize=12)
plt.xlabel("Latitude / $\mathrm{\degree N}$", fontsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
plt.legend(loc='best')
plt.savefig('n_sst_lat.pdf')
# plt.savefig('n_sst_lat.png', dpi=600)
# plt.savefig('n_sst_lat.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_sst(sst_midpoints,sst_q3_hist,sst_q4_hist,sst_q5_hist):
"""
# ------------------------------
# PLOT HISTOGRAM OF SST + MEDIAN
# ------------------------------
"""
# interpolation = np.arange(260.05,319.95,0.1) # original bin midpoints
i = np.arange(260,320,0.1) # bin edges
n = len(i)
m = 1.0
q3 = m * pd.Series(np.interp(i,sst_midpoints,sst_q3_hist), index=i)
q4 = m * pd.Series(np.interp(i,sst_midpoints,sst_q4_hist), index=i)
q5 = m * pd.Series(np.interp(i,sst_midpoints,sst_q5_hist), index=i)
dq = pd.DataFrame({'QL=3':q3, 'QL=4':q4, 'QL=5':q5})
dq['QL=4 & 5'] = 0.5 * (dq['QL=4'] + dq['QL=5'])
# dq = dq.mask(np.isinf(df))
M3 = calc_median(dq['QL=3'].values,i[0:n])
M4_5 = calc_median(dq['QL=4 & 5'].values,i[0:n])
interpolation = np.arange(260,320,1) # 10x original resolution
n = len(interpolation)
multiplier = 10.0
Q3 = multiplier * pd.Series( | np.interp(interpolation,sst_midpoints,sst_q3_hist) | numpy.interp |
from pathlib import Path
from numpy import arange, array, ceil, empty, floor, isnan, linspace, \
log10, meshgrid, nan, tile, transpose, where
from numpy.ma import masked_where
from matplotlib.pyplot import clf, close, cm, colorbar, figure, savefig, show
from mpl_toolkits.basemap import Basemap
from os.path import dirname, isdir, join, realpath
from os import mkdir
import pyapex, seaborn
from scipy.interpolate import interp2d#, RectBivariateSpline
#
from pyigrf.pyigrf import GetIGRF
from pyiri2016 import IRI2016
from pyiri2016 import IRI2016Profile
from pyiri2016.iriweb import irisubgl, firisubl
from timeutil import TimeUtilities
#
cwd = Path(__file__).parent
DataFolder = cwd / 'data'
class IRI2016_2DProf(IRI2016Profile):
#def __init__(self):
# pass
#def _GetTitle(self):
# IRI2016Profile()._GetTitle(__self__)
def HeightVsTime(self, FIRI=False, hrlim=[0., 24.], hrstp=1.):
self.option = 1
nhrstp = int((hrlim[1] + hrstp - hrlim[0]) / hrstp) + 1
hrbins = list(map(lambda x: hrlim[0] + float(x) * hrstp, range(nhrstp)))
Ne = empty((nhrstp, self.numstp))
if FIRI: NeFIRI = empty((nhrstp, self.numstp))
Te = empty((nhrstp, self.numstp))
Ti = empty((nhrstp, self.numstp))
for i in range(nhrstp):
self.hour = hrbins[i]
self.HeiProfile()
Ne[i, :] = self.a[0, range(self.numstp)]
if FIRI: NeFIRI[i, :] = self.a[12, range(self.numstp)]
Te[i, :] = self.a[3, range(self.numstp)]
Ti[i, :] = self.a[2, range(self.numstp)]
# self._GetTitle()
altbins = arange(self.vbeg, self.vend + self.vstp, self.vstp)
self.data2D = {'alt' : altbins, 'hour' : hrbins, \
'Ne' : Ne, 'Te' : Te, 'Ti' : Ti, \
'title1' : self.title1, 'title2' : self.title2}
if FIRI:
self.FIRI2D = {'alt' : altbins, 'hour' : hrbins, \
'Ne' : NeFIRI, \
'title1' : self.title1, 'title2' : self.title2}
#
# End of 'HeightVsTime'
#####
def LatVsLon(self, lonlim=[-180., 180.], lonstp=20.):
self.option = 2
nlonstp = int((lonlim[1] + lonstp - lonlim[0]) / lonstp) + 1
lonbins = list(map(lambda x: lonlim[0] + float(x) * lonstp, range(nlonstp)))
NmF2 = empty((nlonstp, self.numstp))
hmF2 = empty((nlonstp, self.numstp))
B0 = empty((nlonstp, self.numstp))
dip = empty((nlonstp, self.numstp))
for i in range(nlonstp):
self.lon = lonbins[i]
self.HeiProfile()
NmF2[i, :] = self.b[0, range(self.numstp)]
hmF2[i, :] = self.b[1, range(self.numstp)]
B0[i, :] = self.b[9, range(self.numstp)]
dip[i, :] = self.b[24, range(self.numstp)]
latbins = arange(self.vbeg, self.vend + self.vstp, self.vstp)
self.data2D = {'lat' : latbins, 'lon' : lonbins, \
'NmF2' : NmF2, 'hmF2' : hmF2, 'B0' : B0, 'dip' : dip, \
'title' : self.title3}
#
# End of 'LatVsLon'
#####
def LatVsFL(self, date=[2003, 11, 21], FIRI=False, IGRF=False, time=[23, 15, 0], \
gc=[-77.76, -11.95], \
hlim=[80., 200.], hstp=1., mlatlim=[-10., 10.], mlatstp=.1):
#
# INPUTS
#
# Date
year, month, day = date
# Time
hour, minute, second = time
# Geog. Coord.
dlon, dlat = gc
# hlim -> Height range at equator, in km
# hstp -> height resolution at equator, in km
# mlatlim -> Geom. latitude range, in degrees
# mlatstp -> Geom. latitude resolution, in degrees
#
###
doy = TimeUtilities().CalcDOY(year, month, day)
date2 = year + doy / (365 + 1 if TimeUtilities().IsLeapYear else 0)
# f = figure(figsize=(16,6))
# pn = f.add_subplot(111)
self.coordl, self.qdcoordl = [], []
for h in arange(hlim[0], hlim[1] + hstp, hstp):
gc, qc = pyapex.ApexFL().getFL(date=date2, dlon=dlon, dlat=dlat, \
hateq=h, mlatRange=mlatlim, mlatSTP=mlatstp)
# x, y, z = gc['lat'], gc['alt'], gc['lon']
# ind = where(y < hlim[0])
# if len(ind) > 0: x[ind], y[ind], z[ind] = nan, nan, nan
# pn.plot(x, y)
self.coordl.append([gc['lon'], gc['alt'], gc['lat']])
self.qdcoordl.append([qc['lon'], gc['alt'], qc['lat']])
# pn.invert_xaxis()
# show()
jf = IRI2016().Switches()
jmag = 0
mmdd = int(month * 100) + day
hour2 = hour + minute / 60 + second / 3600
self.coordl = array(self.coordl)
self.qdcoordl = array(self.qdcoordl)
# nfl -> No. of field-line (or height)
# nc -> No. of coord. (0 -> lon, 1 -> alt, 2 -> lat)
# np -> No. of points per field-line
nfl, nc, np = self.coordl.shape
self.ne, self.te = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.ti, self.tn = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nHe, self.nO = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nN2, self.nO2 = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nAr, self.nH = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nN, self.babs = tile(nan, (np, nfl)), tile(nan, (np, nfl))
if FIRI: self.neFIRI = tile(nan, (np, nfl))
for fl in range(nfl):
curr_coordl = transpose(self.coordl[fl, :, :])
ind = where(curr_coordl[:, 1] >= (hlim[0] - 10.))
if len(ind[0]) > 0:
outf, oarr = irisubgl(jf, jmag, year, mmdd, hour2, \
curr_coordl[ind[0], :], DataFolder)
self.ne[ind[0], fl] = outf[0, :]
self.tn[ind[0], fl] = outf[1, :]
self.ti[ind[0], fl] = outf[2, :]
self.te[ind[0], fl] = outf[3, :]
if FIRI: self.neFIRI[ind[0], fl], ierr = firisubl(year, doy, hour2, \
curr_coordl[ind[0], :], DataFolder)
self.nHe[ind[0], fl] = outf[20, :]
self.nO[ind[0], fl] = outf[21, :]
self.nN2[ind[0], fl] = outf[22, :]
self.nO2[ind[0], fl] = outf[23, :]
self.nAr[ind[0], fl] = outf[24, :]
self.nH[ind[0], fl] = outf[26, :]
self.nN[ind[0], fl] = outf[27, :]
self.babs[ind[0], fl] = list(self.getIGRF(curr_coordl[ind[0], :], date2)) \
if IGRF else outf[19, :]
self.hlim = hlim
self.date, self.time = date, time
self.f107cm = oarr[40, 0]
self.ap, self.Ap = oarr[50, 0], oarr[51, 0]
#
# End of 'LatVsFL'
#####
def _Get_Title(self):
dateStr = 'DATE: {:4d}/{:02d}/{:02d}'.format(self.date[0], self.date[1], self.date[2])
timeStr = 'TIME: {:02d}:{:02d} UT'.format(self.time[0], self.time[1])
f107Str = 'F107: {:6.2f}'.format(self.f107cm)
apStr = 'ap: {:3d}'.format(int(self.ap))
ApStr = 'Ap: {:3d}'.format(int(self.Ap))
gmlon = self.qdcoordl[0, 0, 0]
gmlonStr = '{:7.2f} {:s}'.format(abs(gmlon), 'E' if gmlon > 0. else 'W')
self._title1 = '{:s} - {:s} - MAG. LON.:{:s}'.format(dateStr, timeStr, gmlonStr)
self._title2 = '{:s} - {:s}'.format(f107Str, ApStr)
#
# End of '_GetTitle'
######
def getIGRF(self, coordl, year):
for lon, alt, lat in coordl:
bn, be, bd, xl, icode = GetIGRF(lat, lon, alt, year)
# Horizontal component
bh = (bn**2 + be**2)**.5
yield bh
def PlotLatVsFL(self):
self._Get_Title()
nrow, ncol = 2, 2
spID = nrow * 100 + ncol * 10
counter = 0
X, Y = transpose(self.coordl[:, 2, :]), transpose(self.coordl[:, 1, :])
f = figure(figsize=(16, 6))
for ir in range(nrow):
for ic in range(ncol):
pn = f.add_subplot(spID + (counter + 1))
if counter == 0:
Z = log10(self.ne)
vmin, vmax, nc = 8, 12, 32+1
zlabel = 'Log$_{10}$N$_e$(m$^{-3}$)'
elif counter == 1:
Z = log10(self.nHe)
vmin, vmax, nc = 5, 9, 32+1
zlabel = 'Log$_{10}$H$_e$(m$^{-3}$)'
elif counter == 2:
Z = self.te
vmin, vmax, nc = 100, 1200, 36+1
zlabel = 'T$_e$($^\circ$)'
elif counter == 3:
Z = self.tn
vmin, vmax, nc = 100, 1200, 36+1
zlabel = 'T$_n$($^\circ$)'
Z_masked = masked_where(isnan(Z), Z)
C = linspace(vmin, vmax, nc, endpoint=True)
ipc = pn.contourf(X, Y, Z_masked, C, cmap=cm.jet, extent='both', origin='lower')
if counter == 0: pn.set_title(self._title1)
if counter == 1: pn.set_title(self._title2)
if counter > 1: pn.set_xlabel('Geog. Lat. ($^\circ$)')
pn.set_ylabel('Altitude (km)')
pn.set_ylim(self.hlim)
pn.invert_xaxis()
pn.grid()
cp = colorbar(ipc)
cp.set_label(zlabel)
counter += 1
show()
#
# End of 'PlotLatVsFL'
#####
def PlotLatVsFLFIRI(self, save=False, verbose=False):
self._Get_Title()
nrow, ncol = 1, 1
spID = nrow * 100 + ncol * 10
counter = 0
X, Y = transpose(self.coordl[:, 2, :]), transpose(self.coordl[:, 1, :])
f = figure(figsize=(16, 6))
for ir in range(nrow):
for ic in range(ncol):
pn = f.add_subplot(spID + (counter + 1))
if counter == 0:
Z = log10(self.neFIRI)
vmin, vmax, nc = 9, 12, 24+1
zlabel = 'Log$_{10}$N$_e$(m$^{-3}$)'
#Z_masked = masked_where(isnan(Z), Z)
Z[where(Z < vmin)] = vmin
C = linspace(vmin, vmax, nc, endpoint=True)
ipc = pn.contourf(X, Y, Z, C, cmap=cm.jet, extent='both', origin='lower')
if counter == 0: pn.set_title(self._title1)
#if counter == 1: pn.set_title(self._title2)
pn.set_xlabel('Geog. Lat. ($^\circ$)')
pn.set_ylabel('Altitude (km)')
pn.set_ylim(self.hlim)
pn.invert_xaxis()
pn.grid()
cp = colorbar(ipc)
cp.set_label(zlabel)
counter += 1
if not save:
show()
else:
gpath = '../figures/' + '{:04d}{:02d}{:02d}/'.format(self.year, self.month, self.dom)
if not isdir(gpath): mkdir(gpath)
self.figname = gpath + 'firi-{:02d}{:02d}.jpg'.format(self.time[0], self.time[1])
if verbose: print('Saving at: {:s}'.format(self.figname))
savefig(self.figname, bbox_inches='tight', format='jpg', dpi=100)
clf()
close()
#
# End of 'PlotLatVsFL'
#####
def Plot2D(self):
f = figure(figsize=(24, 6))
if self.option == 1:
pn = f.add_subplot(131)
X, Y = meshgrid(self.data2D['hour'], self.data2D['alt'])
ipc = pn.pcolor(X, Y, transpose(log10(self.data2D['Ne'])), cmap=cm.jet, vmax=13, vmin=9)
pn.set_title(self.data2D['title1'])
pn.set_xlabel('Hour (UT)')
pn.set_ylabel('Altitude (km)')
cp1 = colorbar(ipc)
cp1.set_label('Log$_{10}$N$_e$(m$^{-3}$)')
pn = f.add_subplot(132)
ipc = pn.pcolor(X, Y, transpose(self.data2D['Te']), cmap=cm.jet, vmax=4000, vmin=100)
pn.set_title(self.data2D['title2'])
pn.set_xlabel('Hour (UT)')
pn.set_ylabel('Altitude (km)')
cp1 = colorbar(ipc)
cp1.set_label('T$_e$ ($^\circ$)')
pn = f.add_subplot(133)
ipc = pn.pcolor(X, Y, transpose(self.data2D['Ti']), cmap=cm.jet, vmax=4000, vmin=100)
pn.set_xlabel('Hour (UT)')
pn.set_ylabel('Altitude (km)')
cp1 = colorbar(ipc)
cp1.set_label('T$_i$ ($^\circ$)')
elif self.option == 2:
pn1 = f.add_subplot(111)
m = Basemap(llcrnrlon=self.data2D['lon'][0], llcrnrlat=self.data2D['lat'][0], \
urcrnrlon=self.data2D['lon'][-1], urcrnrlat=self.data2D['lat'][-1], \
resolution='l')
m.drawcoastlines()
parallelsLim = self._RoundLim([self.data2D['lat'][0], self.data2D['lat'][-1]])
m.drawparallels(arange(parallelsLim[0], parallelsLim[1], 20.), \
labels=[True, False, False, True])
meridiansLim = self._RoundLim([self.data2D['lon'][0], self.data2D['lon'][-1]])
m.drawmeridians(arange(meridiansLim[0], meridiansLim[1], 30.), \
labels=[True, False, False, True])
X, Y = meshgrid(self.data2D['lon'], self.data2D['lat'])
ipc = m.pcolor(X, Y, | transpose(9.*self.data2D['NmF2']**.5 * 1e-6) | numpy.transpose |
"""
Implements pipelines to track a sequence: Obtain costs, solve the instance (global or instance wise)
"""
import torch
import numpy as np
from scipy.sparse import csc_matrix
from tqdm import tqdm
import math
import os
from src.TrackingModel import TrackingModel
from src.datasets import Data, SplittedDataloader
from src.utilities.conversions import to_numpy
''' Cost update functions '''
def temporal_decay(delta_time):
""" creates a temporal decay factor based on the temporal distance """
return 1 / (10 * delta_time.clamp(0, 2) + 0.1)
def induce_soft_constraints(data, result):
"""
Induces Soft-constraints by adding High cost value to hingh confident edges.
"""
if "high_confident" in data["edges"].keys():
high_confidention_cost = -1000
result['edge_costs'] = result['edge_costs'] + data["edges"]["high_confident"] * high_confidention_cost
return result
''' The tracker class to solve instances '''
class Tracker:
node_cost_keys = ['out_costs', 'in_costs', 'node_costs']
dataloader_cfg = dict(shuffle=False, num_workers=0, pin_memory=False, batch_size=1)
@staticmethod
def track(model: TrackingModel, dataset: Data):
if not model.solver.solve_instance_wise:
return Tracker.track_global(model, dataset)
else:
return Tracker.track_instance_wise(model, dataset)
@staticmethod
def track_global(model: TrackingModel, dataset: Data):
"""
This function infers and associates the data set with a given model
:param model: The model to evaluate
:param dataset: The dataset of class data. BE SURE THAT ONLY ONE SEQUENCE IS LOADED!
:return Dictionariers with numpy arrays
"""
model.eval()
seq = dataset.sequences_for_inference[0]
''' Create global graph for the sequence'''
full_graph_data = dataset.return_batch_with_full_graph(seq)
number_of_nodes = full_graph_data["nodes"]["frame"].shape[0]
node_row = full_graph_data["nodes"]["row"].cpu().numpy().astype(int)
node_row_to_index_mapping = np.ones(np.max(node_row) + 1)
node_row_to_index_mapping[node_row] = np.arange(node_row.shape[0])
node_row_to_index_mapping = node_row_to_index_mapping.astype(int)
''' Create edge cost and node cost container '''
node_costs = dict()
for key in Tracker.node_cost_keys:
node_costs[key] = torch.zeros_like(full_graph_data["nodes"]["id"][None, :])
edge_cost_matrix = csc_matrix((number_of_nodes, number_of_nodes), dtype=np.float32)
edge_calculations = csc_matrix((number_of_nodes, number_of_nodes), dtype=np.int16)
node_calculations = np.zeros(number_of_nodes, dtype=np.int16)
''' Iterate over dataset and fill cost container with cost values '''
dataset_cfg = dict(sequences=[seq], is_inference=True)
dataloader = SplittedDataloader(dataset, dataset_cfg, Tracker.dataloader_cfg, total_parts=25)
with torch.no_grad():
progress_bar = tqdm(iter(dataloader), desc="Track sequence with global graph")
for data in progress_bar:
if data["edges"]["sink"].numel() == 0:
continue
result = model.calculate_costs(data['nodes'], data['edges'])
result = induce_soft_constraints(data, result)
_rows, _sources, _sinks = \
data["nodes"]["row"].cpu().numpy().astype(int)[0], \
data["edges"]["source"].cpu().numpy().astype(int)[0], \
data["edges"]["sink"].cpu().numpy().astype(int)[0]
# Map detection_rows, sinks and sources to indices of the global graph
detection_indices = node_row_to_index_mapping[_rows]
_sources, _sinks = detection_indices[_sources], detection_indices[_sinks]
node_calculations[detection_indices] += 1
for key in Tracker.node_cost_keys:
node_costs[key][0, detection_indices] += result[key][0]
edge_calculations[_sources, _sinks] += 1
edge_cost_matrix[_sources, _sinks] += result["edge_costs"][0].numpy().astype(np.float32)
''' Convert aggregated edge costs to solver format '''
edge_counter = edge_calculations[
full_graph_data["edges"]["source"].numpy().astype(int),
full_graph_data["edges"]["sink"].numpy().astype(int)]
global_edge_costs = edge_cost_matrix[
full_graph_data["edges"]["source"].numpy().astype(int),
full_graph_data["edges"]["sink"].numpy().astype(int)]
global_edge_costs = global_edge_costs / np.maximum(1, edge_counter)
node_calculations = torch.from_numpy(node_calculations[None, :])
for key in Tracker.node_cost_keys:
node_costs[key] /= node_calculations.clamp(1, 10000)
costs = dict(
node_frames=full_graph_data["nodes"]["frame"][None, :], node_costs=node_costs['node_costs'],
edge_sources=full_graph_data["edges"]["source"][None, :], out_costs=node_costs['out_costs'],
edge_sinks=full_graph_data["edges"]["sink"][None, :], edge_costs=torch.from_numpy(global_edge_costs),
in_costs=node_costs['in_costs'],
)
''' Weight costs with the time '''
delta_time = \
(costs['node_frames'][0][costs['edge_sinks']] - costs['node_frames'][0][costs['edge_sources']]).float() / \
seq["fps"]
weight = temporal_decay(delta_time)
costs['edge_costs'] = costs['edge_costs'][0] * weight
''' Solve global instance and return full graph data '''
with torch.no_grad():
result = model.run_solver(costs=costs)
full_graph_data["prediction"] = result
full_graph_data["edges"]["costs"] = costs['edge_costs']
full_graph_data = to_numpy(full_graph_data)
return full_graph_data
@staticmethod
def track_instance_wise(model: TrackingModel, dataset: Data):
""" Tracks a sequence splitted into instances """
solver = model.solver.instance_solver
''' Create dataset specific values '''
seq = dataset.sequences_for_inference[0]
dataset_cfg = dict(sequences=[seq], is_inference=True)
full_graph_data = dataset.return_batch_with_full_graph(seq, return_edges=False)
number_of_nodes = full_graph_data["nodes"]["frame"].shape[0]
fps = seq["fps"]
datase_name = os.getenv("DATASET", "MOT17")
batchsize = 3 * 50 if datase_name == "MOT20" else 3 * 60
node_row = full_graph_data["nodes"]["row"].cpu().numpy().astype(int)
node_row_to_index_mapping = np.ones(np.max(node_row) + 1)
node_row_to_index_mapping[node_row] = np.arange(node_row.shape[0])
node_row_to_index_mapping = node_row_to_index_mapping.astype(int)
''' Update solver parameter for "irregular" videos with different framerate than 30 '''
if datase_name == "MOT17" and fps != 30:
new_len = str(int(math.floor(2 * fps)))
params = {"MAX_TIMEGAP_BASE": new_len, "MAX_TIMEGAP_LIFTED": new_len, "MAX_TIMEGAP_COMPLETE": new_len}
model.solver.batched_solver.update_params_map(params)
def init_tracker_container():
""" Create data containers required for a tracking run """
node_costs = dict()
for key in Tracker.node_cost_keys:
node_costs[key] = torch.zeros_like(full_graph_data["nodes"]["id"][None, :])
dataloader = SplittedDataloader(dataset, dataset_cfg, Tracker.dataloader_cfg, total_parts=50)
return dataloader, node_costs
def prepare_local_instance(
edge_calculations, edge_cost_matrix, node_calculations, node_costs,
first_frame, last_frame
):
""" Converts the sparse global graph to a local instance """
source, sink = edge_calculations.nonzero()
frames = full_graph_data["nodes"]["frame"].numpy()
if last_frame is not None:
valid = (frames[source] <= last_frame) * (frames[sink] <= last_frame)
source, sink = source[valid], sink[valid]
if first_frame is not None:
valid = (frames[source] >= first_frame) * (frames[sink] >= first_frame)
source, sink = source[valid], sink[valid]
edge_counter = edge_calculations[source, sink]
global_edge_costs = edge_cost_matrix[source, sink]
global_edge_costs = global_edge_costs / edge_counter
node_calculations = torch.from_numpy(node_calculations[None, :])
for key in node_costs:
node_costs[key] = node_costs[key] / node_calculations.float().clamp(1, 10000)
# Convert to cost tensor
costs = dict(
node_frames=full_graph_data["nodes"]["frame"][None, :], edge_sources=torch.from_numpy(source)[None, :],
edge_sinks=torch.from_numpy(sink)[None, :], edge_costs=torch.from_numpy(global_edge_costs),
in_costs=node_costs['in_costs'], out_costs=node_costs['out_costs'], node_costs=node_costs['node_costs']
)
return costs
def delete_old_nodes_and_edges(
edge_calculations, edge_cost_matrix, node_calculations, node_costs, min_frame
):
""" Removes entries from the sparse matrix for frames smaller than the current minimal frame"""
frames_to_be_removed = np.where(full_graph_data["nodes"]["frame"] < min_frame)[0]
edge_calculations[edge_calculations[frames_to_be_removed, :].nonzero()] = 0
edge_calculations[edge_calculations[:, frames_to_be_removed].nonzero()] = 0
edge_cost_matrix[edge_cost_matrix[frames_to_be_removed, :].nonzero()] = 0
edge_cost_matrix[edge_cost_matrix[:, frames_to_be_removed].nonzero()] = 0
edge_cost_matrix.eliminate_zeros()
edge_calculations.eliminate_zeros()
node_calculations[frames_to_be_removed] = 0
for key in node_costs.keys():
node_costs[key][0, frames_to_be_removed] = 0
return edge_calculations, edge_cost_matrix, node_calculations, node_costs
def iterate_through_dataset(node_costs):
""" Iterates over the sequence and solves batches"""
''' Create empty data container to accumulate costs '''
edge_cost_matrix, edge_calculations, node_calculations = \
csc_matrix((number_of_nodes, number_of_nodes), dtype=np.float32), \
csc_matrix((number_of_nodes, number_of_nodes), dtype=np.int16), \
np.zeros(number_of_nodes, dtype=np.int16)
data_stack = list()
''' Iterate over sequence and calculate all edges '''
progress_bar = tqdm(iter(dataloader), desc="Track sequence batchwise graph")
with torch.no_grad():
for datas in progress_bar:
datas = [datas] if type(datas) != list else datas
for data in datas:
if data["edges"]["sink"].numel() == 0:
continue
l_bound, u_bound = solver.time_bounds[0], solver.time_bounds[1]
''' Do inference for current batch'''
result = model.calculate_costs(data['nodes'], data['edges'])
result = induce_soft_constraints(data, result)
min_frame, max_frame = data["nodes"]["frame"].min().item(), data["nodes"]["frame"].max().item()
if max_frame < l_bound:
continue
''' Add calculated node and edge costs to accumulator '''
_rows, _sources, _sinks = \
data["nodes"]["row"].cpu().numpy().astype(int)[0], \
data["edges"]["source"].cpu().numpy().astype(int)[0], \
data["edges"]["sink"].cpu().numpy().astype(int)[0]
# Map detection_rows, sinks and sources to indices of the global graph
detection_indices = node_row_to_index_mapping[_rows]
_sources, _sinks = detection_indices[_sources], detection_indices[_sinks]
node_calculations[detection_indices] += 1
# Weight costs with time
delta_time = data["edges"]["delta_t"]
delta_time = delta_time.float()
weight = temporal_decay(delta_time)
result['edge_costs'][0] = result['edge_costs'][0] * weight
for key in Tracker.node_cost_keys:
node_costs[key][0, detection_indices] += result[key][0]
# Aggregate some data, cause updateing the sparse matrix ist slow
_ = result["edge_costs"][0].numpy().astype(np.float32)
data_stack.append([_sources, _sinks, _])
''' If all frames for the current batch are processed: Merge data and solve graph '''
solve = min_frame >= solver.time_bounds[1]
if solve:
''' Update sparse matrix with collected data '''
_sources = np.concatenate([_[0] for _ in data_stack])
_sinks = | np.concatenate([_[1] for _ in data_stack]) | numpy.concatenate |
# Optimize.py
# Created: Feb 2016, <NAME>
# Modified: Nov 2016, <NAME>
# Oct 2019, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units, Data
import numpy as np
import vehicle_mf
import procedure_mf
import matplotlib.pyplot as plt
from SUAVE.Optimization import Nexus, carpet_plot
from SUAVE.Optimization.Package_Setups.additive_setup import Additive_Solver
import SUAVE.Optimization.Package_Setups.TRMM.Trust_Region_Optimization as tro
from SUAVE.Optimization.Package_Setups.TRMM.Trust_Region import Trust_Region
import os
# ----------------------------------------------------------------------
# Run the whole thing
# ----------------------------------------------------------------------
def main():
np.random.seed(0)
problem = setup()
tol = 1e-8
def set_add_solver():
solver = Additive_Solver()
solver.local_optimizer = 'SLSQP'
solver.global_optimizer = 'SHGO'
return solver
################### Basic Additive ##################################################
# ------------------------------------------------------------------
# Inactive constraints
# ------------------------------------------------------------------
solver = set_add_solver()
problem.optimization_problem.constraints = np.array([
[ 'x1' , '>', -10., 1., 1*Units.less],
[ 'x2' , '>', -50., 1., 1*Units.less],
],dtype=object)
print('Checking basic additive with no active constraints...')
outputs = solver.Additive_Solve(problem,max_iterations=10,num_samples=20,tolerance=1e-8,print_output=False)
print(outputs)
obj,x1,x2 = get_results(outputs)
# ------------------------------------------------------------------
# Check Results
# ------------------------------------------------------------------
assert( np.isclose(obj, 0, atol=1e-6) )
assert( np.isclose(x1 ,-.1, atol=1e-2) )
assert( np.isclose(x2 , 0, atol=1e-2) )
# ------------------------------------------------------------------
# Active constraint
# ------------------------------------------------------------------
solver = set_add_solver()
problem.optimization_problem.constraints = np.array([
[ 'x1' , '>', -10., 1., 1*Units.less],
[ 'x2' , '>', 1., 1., 1*Units.less],
],dtype=object)
print('Checking basic additive with one active constraint...')
outputs = solver.Additive_Solve(problem,max_iterations=1000,num_samples=20,tolerance=1e-8,print_output=False)
print(outputs)
obj,x1,x2 = get_results(outputs)
# ------------------------------------------------------------------
# Check Results
# ------------------------------------------------------------------
assert( np.isclose(obj, 1, atol=1e-6) )
assert( np.isclose(x1 ,-.1, atol=1e-2) )
assert( np.isclose(x2 , 1, atol=1e-2) )
# ------------------------------------------------------------------
# Other active constraints
# ------------------------------------------------------------------
solver = set_add_solver()
problem.optimization_problem.constraints = np.array([
[ 'x1' , '=', 2., 1., 1*Units.less],
[ 'x2' , '<', -1., 1., 1*Units.less],
],dtype=object)
print('Checking basic additive with two active constraints...')
outputs = solver.Additive_Solve(problem,max_iterations=1000,num_samples=20,tolerance=1e-8,print_output=False)
print(outputs)
obj,x1,x2 = get_results(outputs)
# ------------------------------------------------------------------
# Check Results
# ------------------------------------------------------------------
assert( np.isclose(obj,5.41, atol=1e-6) )
assert( np.isclose(x1 , 2, atol=1e-2) )
assert( np.isclose(x2 , -1, atol=1e-2) )
################# Additive MEI ##################################################
# ------------------------------------------------------------------
# Inactive constraints
# ------------------------------------------------------------------
solver = set_add_solver()
problem.optimization_problem.constraints = np.array([
[ 'x1' , '>', -10., 1., 1*Units.less],
[ 'x2' , '>', -50., 1., 1*Units.less],
],dtype=object)
print('Checking MEI additive with no active constraint...')
outputs = solver.Additive_Solve(problem,max_iterations=10,num_samples=20,tolerance=tol,print_output=False,opt_type='MEI')
print(outputs)
obj,x1,x2 = get_results(outputs)
# ------------------------------------------------------------------
# Check Results
# ------------------------------------------------------------------
assert( np.isclose(obj, 0, atol=1e-6) )
assert( np.isclose(x1 ,-.1, atol=1e-2) )
assert( np.isclose(x2 , 0, atol=1e-2) )
# ------------------------------------------------------------------
# Active constraint
# ------------------------------------------------------------------
solver = set_add_solver()
problem.optimization_problem.constraints = np.array([
[ 'x1' , '>', -10., 1., 1*Units.less],
[ 'x2' , '>', 1., 1., 1*Units.less],
],dtype=object)
print('Checking MEI additive with one active constraint...')
outputs = solver.Additive_Solve(problem,max_iterations=10,num_samples=20,tolerance=tol,print_output=False,opt_type='MEI')
print(outputs)
obj,x1,x2 = get_results(outputs)
# ------------------------------------------------------------------
# Check Results
# ------------------------------------------------------------------
assert( np.isclose(obj, 1, atol=1e-4) ) # optimizer does not reach exactly optimum here
assert( np.isclose(x1 ,-.1, atol=1e-2) )
assert( np.isclose(x2 , 1, atol=1e-2) )
#------------------------------------------------------------------
# Other active constraints
#------------------------------------------------------------------
solver = set_add_solver()
problem.optimization_problem.constraints = np.array([
[ 'x1' , '=', 2., 1., 1*Units.less],
[ 'x2' , '<', -1., 1., 1*Units.less],
],dtype=object)
print('Checking MEI additive with two active constraints...')
outputs = solver.Additive_Solve(problem,max_iterations=10,num_samples=20,tolerance=tol,print_output=False,opt_type='MEI')
print(outputs)
obj,x1,x2 = get_results(outputs)
# ------------------------------------------------------------------
# Check Results
# ------------------------------------------------------------------
assert( np.isclose(obj,5.41, atol=1e-6) )
assert( np.isclose(x1 , 2, atol=1e-6) )
assert( np.isclose(x2 , -1, atol=1e-6) )
################# TRMM ##################################################
tr_optimizer = 'SLSQP'
# ------------------------------------------------------------------
# Inactive constraints
# ------------------------------------------------------------------
problem.optimization_problem.constraints = np.array([
[ 'x1' , '>', -10., 1., 1*Units.less],
[ 'x2' , '>', -50., 1., 1*Units.less],
],dtype=object)
tr = Trust_Region()
problem.trust_region = tr
TRM_opt = tro.Trust_Region_Optimization()
TRM_opt.trust_region_max_iterations = 20
TRM_opt.optimizer = tr_optimizer
print('Checking TRMM with no active constraints...')
outputs = TRM_opt.optimize(problem,print_output=False)
print(outputs)
obj,x1,x2 = get_results(outputs)
# ------------------------------------------------------------------
# Check Results
# ------------------------------------------------------------------
assert( | np.isclose(obj, 0, atol=1e-6) | numpy.isclose |
# Copyright (c) 2012-2020 Jicamarca Radio Observatory
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
"""Definition of diferent Data objects for different types of data
Here you will find the diferent data objects for the different types
of data, this data objects must be used as dataIn or dataOut objects in
processing units and operations. Currently the supported data objects are:
Voltage, Spectra, SpectraHeis, Fits, Correlation and Parameters
"""
import copy
import numpy
import datetime
import json
import schainpy.admin
from schainpy.utils import log
from .jroheaderIO import SystemHeader, RadarControllerHeader
from schainpy.model.data import _noise
def getNumpyDtype(dataTypeCode):
if dataTypeCode == 0:
numpyDtype = numpy.dtype([('real', '<i1'), ('imag', '<i1')])
elif dataTypeCode == 1:
numpyDtype = numpy.dtype([('real', '<i2'), ('imag', '<i2')])
elif dataTypeCode == 2:
numpyDtype = numpy.dtype([('real', '<i4'), ('imag', '<i4')])
elif dataTypeCode == 3:
numpyDtype = numpy.dtype([('real', '<i8'), ('imag', '<i8')])
elif dataTypeCode == 4:
numpyDtype = numpy.dtype([('real', '<f4'), ('imag', '<f4')])
elif dataTypeCode == 5:
numpyDtype = numpy.dtype([('real', '<f8'), ('imag', '<f8')])
else:
raise ValueError('dataTypeCode was not defined')
return numpyDtype
def getDataTypeCode(numpyDtype):
if numpyDtype == numpy.dtype([('real', '<i1'), ('imag', '<i1')]):
datatype = 0
elif numpyDtype == numpy.dtype([('real', '<i2'), ('imag', '<i2')]):
datatype = 1
elif numpyDtype == numpy.dtype([('real', '<i4'), ('imag', '<i4')]):
datatype = 2
elif numpyDtype == numpy.dtype([('real', '<i8'), ('imag', '<i8')]):
datatype = 3
elif numpyDtype == | numpy.dtype([('real', '<f4'), ('imag', '<f4')]) | numpy.dtype |
import numpy as np
from desc.backend import put
from desc.basis import FourierZernikeBasis
def get_initial_guess_scale_bdry(axis, bdry, bdry_ratio,
R_basis:FourierZernikeBasis, Z_basis:FourierZernikeBasis):
"""Generate initial guess by scaling boundary shape
Parameters
----------
axis : ndarray, shape(Naxis,3)
array of axis Fourier coeffs [n,Rcoeff, Zcoeff]
bdry : ndarray, shape(Nbdry,4)
array of boundary Fourier coeffs [m,n,Rcoeff, Zcoeff]
OR
array of real space coordinates, [theta,phi,R,Z]
bdry_ratio : float
fraction in range [0,1] of the full non-axisymmetric boundary to use
R_basis : FourierZernikeBasis
DESCRIPTION
Z_basis : FourierZernikeBasis
DESCRIPTION
Returns
-------
cR : ndarray, shape(N_coeffs,)
Fourier-Zernike coefficients for R, following indexing given in zern_idx
cZ : ndarray, shape(N_coeffs,)
Fourier-Zernike coefficients for Z, following indexing given in zern_idx
"""
modes_R = R_basis.modes
modes_Z = Z_basis.modes
cR = np.zeros((R_basis.num_modes,))
cZ = | np.zeros((Z_basis.num_modes,)) | numpy.zeros |
"""
Test nimare.meta.kernel (CBMA kernel estimators).
"""
import shutil
import numpy as np
from scipy.ndimage.measurements import center_of_mass
from nimare.dataset import Dataset
from nimare.meta import kernel
from nimare import extract
def test_alekernel_smoke(testdata_cbma):
"""
Smoke test for nimare.meta.kernel.ALEKernel
"""
# Manually override dataset coordinates file sample sizes
# This column would be extracted from metadata and added to coordinates
# automatically by the Estimator
coordinates = testdata_cbma.coordinates.copy()
coordinates["sample_size"] = 20
kern = kernel.ALEKernel()
ma_maps = kern.transform(coordinates, testdata_cbma.masker, return_type="image")
assert len(ma_maps) == len(testdata_cbma.ids)
ma_maps = kern.transform(coordinates, testdata_cbma.masker, return_type="array")
assert ma_maps.shape[0] == len(testdata_cbma.ids)
# Test set_params
kern.set_params(fwhm=10, sample_size=None)
kern2 = kernel.ALEKernel(fwhm=10)
ma_maps1 = kern.transform(coordinates, testdata_cbma.masker, return_type="array")
ma_maps2 = kern2.transform(coordinates, testdata_cbma.masker, return_type="array")
assert ma_maps1.shape[0] == ma_maps2.shape[0] == len(testdata_cbma.ids)
assert np.array_equal(ma_maps1, ma_maps2)
def test_alekernel_1mm(testdata_cbma):
"""
Peaks of ALE kernel maps should match the foci fed in (assuming focus isn't
masked out).
Test on 1mm template.
"""
# Manually override dataset coordinates file sample sizes
# This column would be extracted from metadata and added to coordinates
# automatically by the Estimator
coordinates = testdata_cbma.coordinates.copy()
coordinates["sample_size"] = 20
id_ = "pain_01.nidm-1"
kern = kernel.ALEKernel()
ma_maps = kern.transform(coordinates, testdata_cbma.masker, return_type="image")
ijk = coordinates.loc[coordinates["id"] == id_, ["i", "j", "k"]]
ijk = ijk.values.astype(int)
kern_data = ma_maps[0].get_fdata()
max_idx = np.where(kern_data == np.max(kern_data))
max_ijk = np.array(max_idx).T
assert np.array_equal(ijk, max_ijk)
def test_alekernel_2mm(testdata_cbma):
"""
Peaks of ALE kernel maps should match the foci fed in (assuming focus isn't
masked out).
Test on 2mm template.
"""
# Manually override dataset coordinates file sample sizes
# This column would be extracted from metadata and added to coordinates
# automatically by the Estimator
coordinates = testdata_cbma.coordinates.copy()
coordinates["sample_size"] = 20
id_ = "pain_01.nidm-1"
kern = kernel.ALEKernel()
ma_maps = kern.transform(coordinates, masker=testdata_cbma.masker, return_type="image")
ijk = coordinates.loc[coordinates["id"] == id_, ["i", "j", "k"]]
ijk = np.squeeze(ijk.values.astype(int))
kern_data = ma_maps[0].get_fdata()
max_idx = np.array(np.where(kern_data == np.max(kern_data))).T
max_ijk = np.squeeze(max_idx)
assert np.array_equal(ijk, max_ijk)
def test_alekernel_inputdataset_returnimages(testdata_cbma):
"""
Peaks of ALE kernel maps should match the foci fed in (assuming focus isn't
masked out).
Test on Dataset object.
"""
# Manually override dataset coordinates file sample sizes
# This column would be extracted from metadata and added to coordinates
# automatically by the Estimator
testdata_cbma = testdata_cbma.copy()
coordinates = testdata_cbma.coordinates.copy()
coordinates["sample_size"] = 20
testdata_cbma.coordinates = coordinates
id_ = "pain_01.nidm-1"
kern = kernel.ALEKernel()
ma_maps = kern.transform(testdata_cbma, return_type="image")
ijk = coordinates.loc[coordinates["id"] == id_, ["i", "j", "k"]]
ijk = np.squeeze(ijk.values.astype(int))
kern_data = ma_maps[0].get_fdata()
max_idx = np.array(np.where(kern_data == np.max(kern_data))).T
max_ijk = np.squeeze(max_idx)
assert np.array_equal(ijk, max_ijk)
def test_alekernel_fwhm(testdata_cbma):
"""
Peaks of ALE kernel maps should match the foci fed in (assuming focus isn't
masked out).
Test with explicit FWHM.
"""
coordinates = testdata_cbma.coordinates.copy()
id_ = "pain_01.nidm-1"
kern = kernel.ALEKernel(fwhm=10)
ma_maps = kern.transform(coordinates, masker=testdata_cbma.masker, return_type="image")
ijk = coordinates.loc[coordinates["id"] == id_, ["i", "j", "k"]]
ijk = np.squeeze(ijk.values.astype(int))
kern_data = ma_maps[0].get_fdata()
max_idx = np.array(np.where(kern_data == np.max(kern_data))).T
max_ijk = np.squeeze(max_idx)
assert np.array_equal(ijk, max_ijk)
def test_alekernel_sample_size(testdata_cbma):
"""
Peaks of ALE kernel maps should match the foci fed in (assuming focus isn't
masked out).
Test with explicit sample size.
"""
coordinates = testdata_cbma.coordinates.copy()
id_ = "pain_01.nidm-1"
kern = kernel.ALEKernel(sample_size=20)
ma_maps = kern.transform(coordinates, masker=testdata_cbma.masker, return_type="image")
ijk = coordinates.loc[coordinates["id"] == id_, ["i", "j", "k"]]
ijk = np.squeeze(ijk.values.astype(int))
kern_data = ma_maps[0].get_fdata()
max_idx = np.array(np.where(kern_data == np.max(kern_data))).T
max_ijk = np.squeeze(max_idx)
assert np.array_equal(ijk, max_ijk)
def test_alekernel_inputdataset_returndataset(testdata_cbma, tmp_path_factory):
"""
Check that the different return types produce equivalent results
(minus the masking element).
"""
tmpdir = tmp_path_factory.mktemp("test_alekernel_inputdataset_returndataset")
testdata_cbma.update_path(tmpdir)
kern = kernel.ALEKernel(sample_size=20)
ma_maps = kern.transform(testdata_cbma, return_type="image")
ma_arr = kern.transform(testdata_cbma, return_type="array")
dset = kern.transform(testdata_cbma, return_type="dataset")
ma_maps_from_dset = kern.transform(dset, return_type="image")
ma_arr_from_dset = kern.transform(dset, return_type="array")
ma_maps_arr = testdata_cbma.masker.transform(ma_maps)
ma_maps_from_dset_arr = dset.masker.transform(ma_maps_from_dset)
dset_from_dset = kern.transform(dset, return_type="dataset")
ma_maps_dset = testdata_cbma.masker.transform(
dset.get_images(ids=dset.ids, imtype=kern.image_type)
)
assert isinstance(dset_from_dset, Dataset)
assert np.array_equal(ma_arr, ma_maps_arr)
assert np.array_equal(ma_arr, ma_maps_dset)
assert np.array_equal(ma_arr, ma_maps_from_dset_arr)
assert np.array_equal(ma_arr, ma_arr_from_dset)
def test_mkdakernel_smoke(testdata_cbma):
"""
Smoke test for nimare.meta.kernel.MKDAKernel, using Dataset object.
"""
kern = kernel.MKDAKernel()
ma_maps = kern.transform(testdata_cbma, return_type="image")
assert len(ma_maps) == len(testdata_cbma.ids)
ma_maps = kern.transform(testdata_cbma.coordinates, testdata_cbma.masker, return_type="array")
assert ma_maps.shape[0] == len(testdata_cbma.ids)
def test_mkdakernel_1mm(testdata_cbma):
"""
COMs of MKDA kernel maps should match the foci fed in (assuming focus isn't
masked out and spheres don't overlap).
Test on 1mm template.
"""
id_ = "pain_01.nidm-1"
kern = kernel.MKDAKernel(r=4, value=1)
ma_maps = kern.transform(testdata_cbma.coordinates, testdata_cbma.masker, return_type="image")
ijk = testdata_cbma.coordinates.loc[testdata_cbma.coordinates["id"] == id_, ["i", "j", "k"]]
ijk = np.squeeze(ijk.values.astype(int))
kern_data = ma_maps[0].get_fdata()
com = np.array(center_of_mass(kern_data)).astype(int).T
com = np.squeeze(com)
assert np.array_equal(ijk, com)
def test_mkdakernel_2mm(testdata_cbma):
"""
COMs of MKDA kernel maps should match the foci fed in (assuming focus isn't
masked out and spheres don't overlap).
Test on 2mm template.
"""
id_ = "pain_01.nidm-1"
kern = kernel.MKDAKernel(r=4, value=1)
ma_maps = kern.transform(testdata_cbma.coordinates, testdata_cbma.masker, return_type="image")
ijk = testdata_cbma.coordinates.loc[testdata_cbma.coordinates["id"] == id_, ["i", "j", "k"]]
ijk = np.squeeze(ijk.values.astype(int))
kern_data = ma_maps[0].get_fdata()
com = np.array(center_of_mass(kern_data)).astype(int).T
com = np.squeeze(com)
assert np.array_equal(ijk, com)
def test_mkdakernel_inputdataset_returndataset(testdata_cbma, tmp_path_factory):
"""
Check that the different return types produce equivalent results
(minus the masking element).
"""
tmpdir = tmp_path_factory.mktemp("test_mkdakernel_inputdataset_returndataset")
testdata_cbma.update_path(tmpdir)
kern = kernel.MKDAKernel(r=4, value=1)
ma_maps = kern.transform(testdata_cbma, return_type="image")
ma_arr = kern.transform(testdata_cbma, return_type="array")
dset = kern.transform(testdata_cbma, return_type="dataset")
ma_maps_from_dset = kern.transform(dset, return_type="image")
ma_arr_from_dset = kern.transform(dset, return_type="array")
dset_from_dset = kern.transform(dset, return_type="dataset")
ma_maps_arr = testdata_cbma.masker.transform(ma_maps)
ma_maps_from_dset_arr = dset.masker.transform(ma_maps_from_dset)
ma_maps_dset = testdata_cbma.masker.transform(
dset.get_images(ids=dset.ids, imtype=kern.image_type)
)
assert isinstance(dset_from_dset, Dataset)
assert np.array_equal(ma_arr, ma_maps_arr)
assert np.array_equal(ma_arr, ma_maps_dset)
assert np.array_equal(ma_arr, ma_maps_from_dset_arr)
assert np.array_equal(ma_arr, ma_arr_from_dset)
def test_kdakernel_smoke(testdata_cbma):
"""
Smoke test for nimare.meta.kernel.KDAKernel
"""
kern = kernel.KDAKernel()
ma_maps = kern.transform(testdata_cbma.coordinates, testdata_cbma.masker, return_type="image")
assert len(ma_maps) == len(testdata_cbma.ids)
ma_maps = kern.transform(testdata_cbma.coordinates, testdata_cbma.masker, return_type="array")
assert ma_maps.shape[0] == len(testdata_cbma.ids)
def test_kdakernel_1mm(testdata_cbma):
"""
COMs of KDA kernel maps should match the foci fed in (assuming focus isn't
masked out and spheres don't overlap).
Test on 1mm template.
"""
id_ = "pain_01.nidm-1"
kern = kernel.KDAKernel(r=4, value=1)
ma_maps = kern.transform(testdata_cbma.coordinates, testdata_cbma.masker, return_type="image")
ijk = testdata_cbma.coordinates.loc[testdata_cbma.coordinates["id"] == id_, ["i", "j", "k"]]
ijk = np.squeeze(ijk.values.astype(int))
kern_data = ma_maps[0].get_fdata()
com = np.array(center_of_mass(kern_data)).astype(int).T
com = | np.squeeze(com) | numpy.squeeze |
# *-* encoding: utf-8 *-*
# Unit tests for ppn functions
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
from faster_particles.ppn_utils import generate_anchors, \
top_R_pixels, clip_pixels, \
compute_positives_ppn1, compute_positives_ppn2, assign_gt_pixels, \
include_gt_pixels, predicted_pixels, crop_pool_layer, \
all_combinations, slice_rois, \
nms_step, nms
def generate_anchors_np(im_shape, repeat=1):
dim = len(im_shape)
anchors = np.indices(im_shape).transpose(tuple(range(1, dim+1)) + (0,))
anchors = anchors + 0.5
anchors = np.reshape(anchors, (-1, dim))
return np.repeat(anchors, repeat, axis=0)
def clip_pixels_np(pixels, im_shape):
"""
pixels shape: [None, 2]
Clip pixels (x, y) to [0, im_shape[0]) x [0, im_shape[1])
"""
dim = len(im_shape)
for i in range(dim):
pixels[:, i] = np.clip(pixels[:, i], 0, im_shape[i])
return pixels
class Test(unittest.TestCase):
def generate_anchors(self, im_shape, repeat):
anchors_np = generate_anchors_np(im_shape, repeat=repeat)
with tf.Session():
anchors_tf = generate_anchors(im_shape, repeat=repeat)
return np.array_equal(anchors_tf, anchors_np)
def test_generate_anchors_2d(self):
im_shape = (2, 2)
repeat = 3
return self.generate_anchors(im_shape, repeat)
def test_generate_anchors_3d(self):
im_shape = (2, 2, 2)
repeat = 3
return self.generate_anchors(im_shape, repeat)
def clip_pixels(self, im_shape, proposals_np):
pixels_np = clip_pixels_np(proposals_np, im_shape)
with tf.Session() as sess:
proposals = tf.constant(proposals_np, dtype=tf.float32)
pixels = clip_pixels(proposals, im_shape)
pixels_tf = sess.run(pixels)
return np.allclose(pixels_np, pixels_tf)
def test_clip_pixels_2d(self):
im_shape = (3, 3)
proposals_np = np.array([[-0.5, 1.0], [0.01, 3.4], [2.5, 2.99]])
return self.clip_pixels(im_shape, proposals_np)
def test_clip_pixels_3d(self):
im_shape = (2, 2, 2)
proposals_np = np.random.rand(5, 3)*4-1
return self.clip_pixels(im_shape, proposals_np)
def top_R_pixels(self, R, threshold, proposals_np, scores_np):
threshold_indices = np.nonzero(scores_np > threshold)
scores_np = scores_np[threshold_indices]
proposals_np = proposals_np[threshold_indices]
sorted_indices = np.argsort(scores_np)
roi_scores_np = scores_np[sorted_indices][::-1][:R]
rois_np = proposals_np[sorted_indices][::-1][:R]
with tf.Session() as sess:
proposals = tf.constant(proposals_np, dtype=tf.float32)
scores = tf.constant(scores_np, dtype=tf.float32)
rois, roi_scores = top_R_pixels(proposals, scores, R=R, threshold=threshold)
rois_tf, roi_scores_tf = sess.run([rois, roi_scores])
return np.allclose(rois_tf, rois_np) and np.allclose(roi_scores_np, roi_scores_tf)
def test_top_R_pixels_2d(self):
R = 3
threshold = 0.5
# Shape N*N x 2
proposals_np = np.array([[0.0, 1.0], [0.5, 0.7], [0.3, 0.88], [-0.2, 0.76], [0.23, 0.47], [0.33, 0.56], [0.0, 0.4], [-0.6, 0.3], [0.27, -0.98]])
# Shape N*N x 1
scores_np = np.array([0.1, 0.5, 0.7, 0.45, 0.65, 0.01, 0.78, 0.98, 0.72])
return self.top_R_pixels(R, threshold, proposals_np, scores_np)
def test_top_R_pixels_3d(self):
R = 3
threshold = 0.5
# shape N*N x 3
proposals_np = np.array([[0.0, 1.0, 0.3], [0.87, 0.1, -0.34], [0.45, 0.68, 0.09],
[0.34, 0.21, -0.6], [0.12, -0.4, 0.8], [0.48, 0.43, -0.79], [0.89, 0.05, -0.02], [0.9, 0.04, 1.0]])
# shape N*N x 1
scores_np = np.array([0.1, 0.5, 0.7, 0.45, 0.65, 0.01, 0.78, 0.98])
return self.top_R_pixels(R, threshold, proposals_np, scores_np)
def predicted_pixels(self, im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np):
dim = len(im_shape)
anchors_np = generate_anchors_np(im_shape, repeat=repeat)
scores = rpn_cls_prob_np[..., 1:]
roi_scores_np = np.reshape(scores, (-1, scores.shape[-1]))
anchors_np = np.reshape(anchors_np, (-1,) + (rpn_cls_prob_np.shape[1],) * dim + (dim,))
proposals = anchors_np + rpn_bbox_pred_np
proposals = np.reshape(proposals, (-1, dim))
# clip predicted pixels to the image
proposals = clip_pixels_np(proposals, im_shape) # FIXME np function
rois_np = proposals.astype(float)
with tf.Session() as sess:
anchors_tf = generate_anchors(im_shape, repeat=repeat)
rpn_cls_prob_tf = tf.constant(rpn_cls_prob_np, dtype=tf.float32)
rpn_bbox_pred_tf = tf.constant(rpn_bbox_pred_np, dtype=tf.float32)
rois, roi_scores = predicted_pixels(rpn_cls_prob_tf, rpn_bbox_pred_tf, anchors_tf, im_shape)
rois_tf, roi_scores_tf = sess.run([rois, roi_scores])
return np.allclose(rois_tf, rois_np) and np.allclose(roi_scores_tf, roi_scores_np)
def test_predicted_pixels1_2d(self): # for PPN1
im_shape = (2, 2)
repeat = 1
# Shape [None, N, N, n] where n = 2 (background/signal)
rpn_cls_prob_np = np.array([[[[0.1, 0.9], [0.3, 0.7]], [[0.5, 0.5], [0.8, 0.2]]]])
# Shape [None, N, N, 2]
rpn_bbox_pred_np = np.array([[[[0.1, 0.1], [0.5, 0.2]], [[0.9, -0.5], [0.1, -0.4]]]])
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels1_3d(self):
im_shape = (2, 2, 2)
repeat = 1
rpn_cls_prob_np = np.random.rand(1, 2, 2, 2, 2)
rpn_bbox_pred_np = np.random.rand(1, 2, 2, 2, 3)*2-1
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels2_2d(self): # for PPN2
im_shape = (2, 2)
repeat = 1
# Shape [None, N, N, n] where n = num_classes
rpn_cls_prob_np = np.array([[[[0.1, 0.8, 0.1], [0.3, 0.65, 0.05]], [[0.5, 0.02, 0.48], [0.8, 0.18, 0.02]]]])
# Shape [None, N, N, 2]
rpn_bbox_pred_np = np.array([[[[0.1, 0.1], [0.5, 0.2]], [[0.9, -0.5], [0.1, -0.4]]]])
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def test_predicted_pixels2_3d(self):
im_shape = (2, 2, 2)
repeat = 1
rpn_cls_prob_np = np.random.rand(1, 2, 2, 2, 3)
rpn_bbox_pred_np = np.random.rand(1, 2, 2, 2, 3)*2-1
return self.predicted_pixels(im_shape, repeat, rpn_cls_prob_np, rpn_bbox_pred_np)
def include_gt_pixels(self, rois_np, gt_pixels_np, dim1, dim2):
dim = gt_pixels_np.shape[-1]
# convert to F3 coordinates
gt_pixels_coord = np.floor(gt_pixels_np / dim1)
# Get 3x3 pixels around this in F3
gt_pixels_coord = gt_pixels_coord[:, np.newaxis, :]
gt_pixels_coord = np.tile(gt_pixels_coord, [1, 3**dim, 1]) # shape N x 9 x 2
shifts = all_combinations(([-1, 0, 1],) * dim)
update = np.tile(shifts[np.newaxis, :, :], [gt_pixels_coord.shape[0], 1, 1])
gt_pixels_coord = gt_pixels_coord + update
gt_pixels_coord = np.reshape(gt_pixels_coord, (-1, dim)) # Shape N*9, 2
# Go back to F5 coordinates
gt_pixels_coord = gt_pixels_coord / dim2
rois_result_np = np.vstack([np.floor(rois_np), gt_pixels_coord]) # shape [None, 2]
with tf.Session() as sess:
rois_tf = tf.constant(rois_np, dtype=tf.float32)
gt_pixels_tf = tf.constant(gt_pixels_np, dtype=tf.float32)
rois_tf = include_gt_pixels(rois_tf, gt_pixels_tf, dim1, dim2)
rois_result_tf = sess.run(rois_tf)
return np.allclose(rois_result_tf, rois_result_np)
def test_include_gt_pixels_2d(self):
dim1, dim2 = 8.0, 4.0
# [None, 2] in F5 coordinates
rois_np = np.array([[0, 3], [15, 2], [3, 4], [5.6, 9.1]])
# [None, 2]
gt_pixels_np = np.array([[2.4, 2.3], [3, 4], [6.4, 1.2]])
return self.include_gt_pixels(rois_np, gt_pixels_np, dim1, dim2)
def test_include_gt_pixels_3d(self):
dim1, dim2 = 8.0, 4.0
rois_np = np.random.rand(10, 3)
gt_pixels_np = np.random.rand(4, 3)*dim1*dim2
return self.include_gt_pixels(rois_np, gt_pixels_np, dim1, dim2)
def compute_positives_ppn1(self, gt_pixels_test, N3, dim1, dim2):
dim =gt_pixels_test.shape[-1]
classes_np = np.zeros((N3,)*dim)
gt_pixels_np = np.floor(gt_pixels_test / (dim1 * dim2)).astype(int)
gt_pixels_np = tuple(zip(*gt_pixels_np))
classes_np[gt_pixels_np] = 1.
classes_mask_np = classes_np.reshape(-1,1).astype(bool) # shape (16*16, 1)
with tf.Session() as sess:
gt_pixels_tf = tf.constant(gt_pixels_test, dtype=tf.float32)
classes_mask_tf = compute_positives_ppn1(gt_pixels_tf, N3, dim1, dim2)
classes_mask_tf = sess.run([classes_mask_tf])
return np.allclose(classes_mask_np, classes_mask_tf)
def test_compute_positives_ppn1_2d(self):
dim1, dim2, N3 = 8.0, 4.0, 16
# Dummy input for testing, num of gt pixels = N = 3
gt_pixels_test = np.array([[5.5, 7.7], [511.1, 433.3], [320, 320]])
return self.compute_positives_ppn1(gt_pixels_test, N3, dim1, dim2)
def test_compute_positives_ppn1_3d(self):
dim1, dim2, N3 = 8.0, 4.0, 16
gt_pixels_test = np.array([[5.5, 7.7, 45.9], [511.1, 433.3, 5.6], [320, 320, 201]])
return self.compute_positives_ppn1(gt_pixels_test, N3, dim1, dim2)
def compute_positives_ppn2(self, closest_gt_distance_test, thres_test):
pixel_count = closest_gt_distance_test.shape[0]
common_shape_np = np.array([pixel_count, 1])
mask_np = np.where(np.greater(closest_gt_distance_test, thres_test), False, True)
mask_np[np.argmin(closest_gt_distance_test)] = True
with tf.Session() as sess:
mask_tf = compute_positives_ppn2(closest_gt_distance_test, threshold=thres_test)
mask_tf = sess.run([mask_tf])
return np.allclose(mask_np, mask_tf)
def test_compute_positives_ppn2_2d(self):
nb_rois, N = 5, 16
closest_gt_distance_test = np.arange(nb_rois*N*N).reshape(-1, 1)
thres_test = 2
return self.compute_positives_ppn2(closest_gt_distance_test, thres_test)
def test_compute_positives_ppn2_3d(self):
nb_rois, N = 5, 16
closest_gt_distance_test = np.arange(nb_rois*N*N*N).reshape(-1, 1)
thres_test = 2
return self.compute_positives_ppn2(closest_gt_distance_test, thres_test)
# TODO test rois option too
def assign_gt_pixels(self, gt_pixels_np, proposals_np, dim1, dim2, rois=None):
dim = proposals_np.shape[-1]
gt_pixels = gt_pixels_np[:, :-1]
gt_pixels = gt_pixels[np.newaxis, :, :]
if rois is not None:
proposals = (proposals_np * dim2 * rois) * dim1
else:
proposals = proposals_np * dim1 * dim2
all_gt_pixels = np.tile(gt_pixels, [proposals_np.shape[0], 1, 1])
proposals = proposals[:, np.newaxis, :]
distances = np.sqrt(np.sum(np.power(proposals - all_gt_pixels, 2), axis=2))
closest_gt = np.argmin(distances, axis=1)
closest_gt_distance = np.amin(distances, axis=1)
gt_pixels_labels = gt_pixels_np[:, -1]
closest_gt_label = [gt_pixels_labels[i] for i in closest_gt]
with tf.Session() as sess:
gt_pixels_tf = tf.constant(gt_pixels_np, dtype=tf.float32)
proposals_tf = tf.constant(proposals_np, dtype=tf.float32)
closest_gt_tf, closest_gt_distance_tf, closest_gt_label_tf = assign_gt_pixels(gt_pixels_tf, proposals_tf, dim1, dim2, rois=rois)
closest_gt_result, closest_gt_distance_result, closest_gt_label_result = sess.run([closest_gt_tf, closest_gt_distance_tf, closest_gt_label_tf])
return np.allclose(closest_gt_result, closest_gt) and np.allclose(closest_gt_distance_result, closest_gt_distance) and np.allclose(closest_gt_label_result, closest_gt_label)
def test_assign_gt_pixels_2d(self):
dim1, dim2 = 8.0, 4.0
gt_pixels_np = np.array([[0.5, 5.6, 1], [53, 76, 2]])
proposals_np = np.array([[1.0, 1.0], [7, 75], [98, 10], [5, 34]])
return self.assign_gt_pixels(gt_pixels_np, proposals_np, dim1, dim2)
def test_assign_gt_pixels_3d(self):
dim1, dim2 = 8.0, 4.0
gt_pixels_np = np.array([[0.5, 5.6, 45, 1], [53, 76, 102, 2]])
proposals_np = np.array([[1.0, 1.0, 0.43], [7, 75, 2.3], [98, 10, 45], [5, 34, 72]])
return self.assign_gt_pixels(gt_pixels_np, proposals_np, dim1, dim2)
def crop_pool_layer(self, net, rois_np, dim2, dim):
rois = np.array(rois_np * dim2).astype(int)
nb_channels = net.shape[-1]
if dim == 2:
rois = [net[:, i[0], i[1], :] for i in rois]
elif dim == 3:
rois = [net[:, i[0], i[1], i[2], :] for i in rois]
rois = np.reshape(rois, (-1,) + (1,) * dim + (nb_channels,))
with tf.Session() as sess:
rois_tf = crop_pool_layer(tf.constant(net, dtype=tf.float32), tf.constant(rois_np, dtype=tf.float32), dim2, dim)
rois_result = sess.run(rois_tf)
return np.allclose(rois, rois_result)
def test_crop_pool_layer_2d(self):
dim2, dim = 4.0, 2
net = np.random.rand(1, 64, 64, 16)
rois_np = np.random.rand(10, 2)*16
return self.crop_pool_layer(net, rois_np, dim2, dim)
def test_crop_pool_layer_3d(self):
dim2, dim = 4.0, 3
net = np.random.rand(1, 64, 64, 64, 16)
rois_np = np.random.rand(10, 3)*16
return self.crop_pool_layer(net, rois_np, dim2, dim)
def test_all_combinations(self):
return np.allclose(all_combinations(([0, 1], [0, 1])), np.array([[0, 0], [0, 1], [1, 0], [1, 1]]))
def slice_rois(self, rois_np, dim2):
dim = rois_np.shape[-1]
rois_slice = []
for i in range(dim):
rois_slice.append(np.multiply(rois_np[:, i], dim2))
rois_slice = np.array(rois_slice)[..., np.newaxis, np.newaxis]
indices = ([-2, -1, 0, 1],) * dim
shifts = all_combinations(indices).T[:, np.newaxis, np.newaxis, :]
all_rois = np.add(rois_slice, shifts)
rois = np.reshape(all_rois, (-1, dim)) / dim2
with tf.Session() as sess:
rois_tf = slice_rois(tf.constant(rois_np, dtype=tf.float32), dim2)
rois_result = sess.run(rois_tf)
return np.allclose(rois, rois_result)
def test_slice_rois_2d(self):
dim2 = 4.0
rois_np = np.random.rand(10, 2) * 64
return self.slice_rois(rois_np, dim2)
def test_slice_rois_3d(self):
dim2 = 4.0
rois_np = np.random.rand(10, 3) * 64
return self.slice_rois(rois_np, dim2)
def test_nms_step(self):
order = np.array([1, 2, 0])
x1 = np.array([0, 2, 3])
x2 = np.array([1, 3, 4])
y1 = np.array([0, 1, 0.5])
y2 = np.array([1, 2, 1.5])
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
with tf.Session() as sess:
order = tf.constant(order, dtype=tf.int32)
x1, x2, y1, y2 = tf.constant(x1, dtype=tf.float32), tf.constant(x2, dtype=tf.float32), tf.constant(y1, dtype=tf.float32), tf.constant(y2, dtype=tf.float32)
keep = tf.Variable([0], dtype=tf.int32, name="keep")
threshold = tf.constant(0.5)
size = tf.constant(1.0)
areas = tf.constant(areas, dtype=tf.float32)
sess.run(tf.global_variables_initializer())
result = nms_step(order, x1, y1, x2, y2, areas, keep, threshold, size)
result_np = sess.run(result)
return np.allclose(result_np[-3], | np.array([0, 1]) | numpy.array |
import os
import numpy as np
import soundfile as sf # To read .flac files.
import librosa
# For the neural network.
# Install PyBrain, e.g. pip install pybrain.
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.structure import FullConnection
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer
from pybrain.structure import SoftmaxLayer
from pybrain.structure import TanhLayer
# Read data from a folder into a list.
def getData(division,speaker,datapath,audioType,durationCheck,deltaT,lim1,lim2,numFeatures,noisy,transform):
#deltaT is the duration of the audio frame. Lim1 & Lim2 are the frequency ranges; each frequency is a feature.
#Noisy sets the limit for pauses in speech
#Division is the data, i.e. Train, CV, test
fname = datapath+division+speaker
subPaths = [v+"/" for v in os.listdir(fname) if v[0] != "."]
dataFiles = []
for subPath in subPaths:
files = [v for v in os.listdir(fname+subPath) if v[0] != "." and audioType in v]
for fil in files:
data,samplerate = sf.read(fname+subPath+fil)
duration = len(data)*1./samplerate
if duration >= durationCheck: dataFiles.append(fname+subPath+fil)
chunksF = []
for fil in dataFiles:
data,samplerate = sf.read(fil)
duration = len(data)*1./samplerate
# Divide audio data into frames, or chunks.
numChunks = int(duration/deltaT)
sizeChunk = int(len(data)/numChunks)
for lp in range(0,numChunks):
chunk = data[lp*sizeChunk:(lp+1)*sizeChunk] # get a chunk of speech.
# np.fft.rfft computes the one-dimensional discrete Fourier Transform of the data
if transform == 'Fourier':
chunksF.append(np.abs(np.fft.rfft(chunk))[lim1:lim2]) # take the FFT.
elif transform == 'Mel':
S = librosa.feature.melspectrogram(y=chunk, sr=samplerate, n_mels=128, fmax=lim2)
chunksF.append(np.abs(S))
# Delete quiet parts of speech, i.e. pauses.
# Most of the power is in the bottom 50% of frequencies.
mu = np.mean([ | np.mean(chunksF[i][:numFeatures//2]) | numpy.mean |
import loader as ld
import fun_basicas as fun
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from scipy.optimize import minimize
def coste(theta1, theta2, X, Y, num_etiquetas): # Y preparada
A1, A2, h = forward_prop(X, theta1, theta2)
sum1 = Y * np.log(h)
sum2 = (1 - Y) * np.log(1 - h + 1e-6)
return (-1 / X.shape[0]) * np.sum(sum1 + sum2)
def coste_reg(theta1, theta2, X, Y, num_etiquetas, Lambda):
c = coste(theta1, theta2, X, Y, num_etiquetas)
m = X.shape[0]
e = sum(sum(theta1[:, 1:] ** 2)) + sum(sum(theta2[:, 1:] ** 2))
return c + (Lambda / (2 * m)) * e
def forward_prop(X, theta1, theta2):
n = X.shape[0]
# Se añade una fila de unos a la matriz inicial
X = np.hstack([np.ones([n, 1]), X])
# La capa oculta utiliza la primera matriz de pesos para crear sus neuronas y le añade una fila de unos
Oculta = fun.sigmoide(np.dot(X, theta1.T))
Oculta = np.hstack([np.ones([n, 1]), Oculta])
# El resultado se calcula pasando por la segunda matriz de pesos todas las neuronas de la capa oculta
Resultado = fun.sigmoide(np.dot(Oculta, theta2.T))
return X, Oculta, Resultado
def gradiente(theta1, theta2, X, y):
# Creamos los Delta con la forma de theta pero inicializados a cero
Delta1 = np.zeros(np.shape(theta1))
Delta2 = np.zeros(np.shape(theta2))
m = len(y)
# Se realiza la propagación hacia delante
A1, A2, h = forward_prop(X, theta1, theta2)
# Se realiza la propagación hacia atras para cada
# elemento para comprobar el fallo
for k in range(m):
a1k = A1[k, :]
a2k = A2[k, :]
a3k = h[k, :]
yk = y[k, :]
d3 = a3k - yk
g_prima = (a2k * (1 - a2k))
d2 = np.dot(theta2.T, d3) * g_prima
Delta1 = Delta1 + np.dot(d2[1:, np.newaxis], a1k[np.newaxis, :])
Delta2 = Delta2 + np.dot(d3[:, np.newaxis], a2k[np.newaxis, :])
# Se devuelven los Deltas que corresponden al gradiente
return Delta1 / m, Delta2 / m
def gradiente_reg(theta1, theta2, X, y, Lambda):
m = len(y)
Delta1, Delta2 = gradiente(theta1, theta2, X, y)
# A cada elemento del gradiente (menos la primera columna) se le añade el termino de regularización Lambda
# multiplicado por cada elemento de las matriz theta 1 y theta2
Delta1[:, 1:] = Delta1[:, 1:] + (Lambda / m) * theta1[:, 1:]
Delta2[:, 1:] = Delta2[:, 1:] + (Lambda / m) * theta2[:, 1:]
return Delta1, Delta2
def backprop(params_rn, num_entradas, num_ocultas, num_etiquetas, X, y, reg):
# backprop devuelve una tupla (coste, gradiente) con el coste y el gradiente de
# una red neuronal de tres capas , con num_entradas , num_ocultas nodos en la capa
# oculta y num_etiquetas nodos en la capa de salida. Si m es el numero de ejemplos
# de entrenamiento, la dimensión de ’X’ es (m, num_entradas) y la de ’y’ es
# (m, num_etiquetas)
theta1 = np.reshape(params_rn[:num_ocultas * (num_entradas + 1)], (num_ocultas, (num_entradas + 1)))
theta2 = np.reshape(params_rn[num_ocultas * (num_entradas + 1):], (num_etiquetas, (num_ocultas + 1)))
m = len(y)
D1, D2 = gradiente_reg(theta1, theta2, X, y, reg)
coste = coste_reg(theta1, theta2, X, y, num_etiquetas, reg)
gradiente = np.concatenate((np.ravel(D1), np.ravel(D2)))
return coste, gradiente
def prueba_neurona(X, y, theta1, theta2):
"""función que devuelve el porcentaje de acierto de una red neuronal utilizando unas matrices de pesos dadas"""
n = len(y)
y = np.ravel(y)
_, _, result = forward_prop(X, theta1, theta2)
result = np.argmax(result, axis=1)
return (sum((result + 1)%4 == y) / n * 100)
def validacion_redes(random_state, num_labels, iteraciones, hiddens, lambdas, colores = ['r', 'b' , 'g', 'm']):
Ex, Ey, Vx, Vy, Px, Py = ld.carga_Numpy(random_state)
y_onehot = fun.one_hot(Ey, 4)
input_size = Ex.shape[1]
INIT_EPSILON = 0.12
for hidden_size in hiddens:
theta1 = np.random.random((hidden_size,(input_size + 1)))*(2*INIT_EPSILON) - INIT_EPSILON
theta2 = np.random.random((num_labels,(hidden_size + 1)))*(2*INIT_EPSILON) - INIT_EPSILON
params = np.concatenate((np.ravel(theta1), np.ravel(theta2)))
plt.figure()
i = 0
for reg in lambdas:
percent = []
for iters in iteraciones:
fmin = minimize(fun=backprop, x0=params,
args=(input_size, hidden_size,
num_labels, Ex, y_onehot, reg),
method='TNC', jac=True,
options={'maxiter': iters})
theta1 = np.reshape(fmin.x[:hidden_size*(input_size + 1)],(hidden_size,(input_size + 1)))
theta2 = np.reshape(fmin.x[hidden_size * (input_size+1):],(num_labels,(hidden_size + 1)))
p = prueba_neurona(Vx, Vy, theta1, theta2)
print(p)
percent.append(p)
plt.plot(iteraciones, percent, c = colores[i] , label = ' lambda = {} '.format(reg))
i = i+1
plt.legend()
plt.title("hidden sizes: {}".format(hidden_size))
plt.show()
def prueba_redes(random_state, num_labels, iteraciones, hiddens, lambdas, colores = ['r', 'b' , 'g', 'm']):
Ex, Ey, Vx, Vy, Px, Py = ld.carga_Numpy(random_state)
y_onehot = fun.one_hot(Ey, 4)
input_size = Ex.shape[1]
INIT_EPSILON = 0.12
for hidden_size in hiddens:
theta1 = np.random.random((hidden_size,(input_size + 1)))*(2*INIT_EPSILON) - INIT_EPSILON
theta2 = np.random.random((num_labels,(hidden_size + 1)))*(2*INIT_EPSILON) - INIT_EPSILON
params = np.concatenate((np.ravel(theta1), np.ravel(theta2)))
plt.figure()
i = 0
for reg in lambdas:
percent1 = []
percent2 = []
for iters in iteraciones:
fmin = minimize(fun=backprop, x0=params,
args=(input_size, hidden_size,
num_labels, Ex, y_onehot, reg),
method='TNC', jac=True,
options={'maxiter': iters})
theta1 = np.reshape(fmin.x[:hidden_size*(input_size + 1)],(hidden_size,(input_size + 1)))
theta2 = np.reshape(fmin.x[hidden_size * (input_size+1):],(num_labels,(hidden_size + 1)))
p1 = prueba_neurona(Vx, Vy, theta1, theta2)
print("validación = {}".format(p1))
p2 = prueba_neurona(Px, Py, theta1, theta2)
print("prueba = {}".format(p2))
percent1.append(p1)
percent2.append(p2)
plt.plot(iteraciones, percent1, c = colores[i] , label = 'validación')
plt.plot(iteraciones, percent2, c = colores[i + 1] , label = 'prueba')
i = i+1
plt.legend()
plt.title("hidden sizes: {}".format(hidden_size))
plt.show()
#### Para redes de dos capas ocultas(luego hare una para un numero de capas ocultas cualquiera)
def coste2(theta1, theta2, theta3, X, Y, num_etiquetas): # Y preparada
A1, A2, A3, h = forward_prop2(X, theta1, theta2, theta3)
sum1 = Y * np.log(h)
sum2 = (1 - Y) * np.log(1 - h + 1e-6)
return (-1 / X.shape[0]) * np.sum(sum1 + sum2)
def coste_reg2(theta1, theta2, theta3, X, Y, num_etiquetas, Lambda):
c = coste2(theta1, theta2, theta3, X, Y, num_etiquetas)
m = X.shape[0]
e = sum(sum(theta1[:, 1:] ** 2)) + sum(sum(theta2[:, 1:] ** 2)) + sum(sum(theta3[:, 1:] ** 2))
return c + (Lambda / (2 * m)) * e
def forward_prop2(X, theta1, theta2, theta3):
n = X.shape[0]
# Se añade una fila de unos a la matriz inicial
X = np.hstack([np.ones([n, 1]), X])
# Las capas ocultas utilizan la primera y segunda matrices de pesos para crear sus neuronas y les añaden una fila de unos
Oculta1 = fun.sigmoide( | np.dot(X, theta1.T) | numpy.dot |
import math
import numpy as np
import matplotlib.pyplot as plt
STATE_NUM = 1000
GROUP_NUM = 10
RUNS = 100
START = 500
END_L = 0
END_R = 1001
ALPHA = 0.8
class RandWalk_1000L:
def __init__(self, _alpha=ALPHA, _runs=RUNS, _state_num=STATE_NUM):
self.alpha = _alpha
self.runs = _runs
self.state_num = _state_num
self.state = START
self.weights = | np.zeros(GROUP_NUM) | numpy.zeros |
import numpy as np
import pytest
from autolens.data.array import mask
from autolens.data.array import interpolation
from autolens.model.galaxy import galaxy
from autolens.model.profiles import mass_profiles
@pytest.fixture(name='scheme')
def make_scheme():
return interpolation.InterpolationScheme(shape=(3, 3), image_coords=np.array([[1.0, 1.0]]), image_pixel_scale=1.0)
@pytest.fixture(name='geometry')
def make_geometry():
return interpolation.InterpolationGeometry(y_min=-1.0, y_max=1.0, x_min=-1.0, x_max=1.0,
y_pixel_scale=1.0, x_pixel_scale=1.0)
@pytest.fixture(name='galaxy_no_profiles', scope='function')
def make_galaxy_no_profiles():
return galaxy.Galaxy()
@pytest.fixture(name="galaxy_mass_sis")
def make_galaxy_mass_sis():
sis = mass_profiles.SphericalIsothermal(einstein_radius=1.0)
return galaxy.Galaxy(mass_profile=sis)
class TestInterpolationScheme(object):
class TestConstructor:
def test__sets_up_attributes_correctly(self):
image_coords = np.array([[-1.0, -6.0], [-1.0, 0.0], [-4.0, 2.0],
[-0.0, -1.0], [0.0, 0.0], [0.0, 1.0],
[3.0, -1.0], [1.0, 0.0], [1.0, 1.0]])
interp = interpolation.InterpolationScheme(shape=(3, 3), image_coords=image_coords, image_pixel_scale=1.0)
assert interp.shape == (3, 3)
assert interp.pixels == 9
assert (interp.image_coords == image_coords).all()
assert interp.geometry.y_min == -6.0
assert interp.geometry.y_max == 2.0
assert interp.geometry.x_min == -4.0
assert interp.geometry.x_max == 3.0
assert interp.geometry.y_pixel_scale == 1.0
assert interp.geometry.x_pixel_scale == 1.0
assert interp.geometry.x_size == 7.0
assert interp.geometry.y_size == 8.0
assert interp.geometry.x_start == -4.5
assert interp.geometry.y_start == -6.5
class TestNeighbors:
def test___3x3_grid_neighbors_all_correct(self):
# |0|1|2|
# |3|4|5|
# |6|7|8|
interp = interpolation.InterpolationScheme(shape=(3, 3), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 3, 4])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 4, 5])).all()
assert (interp.bottom_right_neighbors[2] == np.array([-1, 5, -1])).all()
assert (interp.bottom_right_neighbors[3] == np.array([4, 6, 7])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 7, 8])).all()
assert (interp.bottom_right_neighbors[5] == np.array([-1, 8, -1])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, -1, -1])).all()
assert (interp.bottom_right_neighbors[7] == np.array([8, -1, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 3])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 3, 4])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 4, 5])).all()
assert (interp.bottom_left_neighbors[3] == np.array([-1, -1, 6])).all()
assert (interp.bottom_left_neighbors[4] == np.array([3, 6, 7])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 7, 8])).all()
assert (interp.bottom_left_neighbors[6] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, -1, -1])).all()
assert (interp.bottom_left_neighbors[8] == np.array([7, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[3] == np.array([0, 1, 4])).all()
assert (interp.top_right_neighbors[4] == np.array([1, 2, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([2, -1, -1])).all()
assert (interp.top_right_neighbors[6] == np.array([3, 4, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([4, 5, 8])).all()
assert (interp.top_right_neighbors[8] == np.array([5, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[4] == np.array([0, 1, 3])).all()
assert (interp.top_left_neighbors[5] == np.array([1, 2, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([-1, 3, -1])).all()
assert (interp.top_left_neighbors[7] == np.array([3, 4, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([4, 5, 7])).all()
def test___3x4_grid_neighbors_all_correct(self):
# |0|1| 2| 3|
# |4|5| 6| 7|
# |8|9|10|11|
interp = interpolation.InterpolationScheme(shape=(3, 4), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 4, 5])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 5, 6])).all()
assert (interp.bottom_right_neighbors[2] == np.array([3, 6, 7])).all()
assert (interp.bottom_right_neighbors[3] == np.array([-1, 7, -1])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 8, 9])).all()
assert (interp.bottom_right_neighbors[5] == np.array([6, 9, 10])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, 10, 11])).all()
assert (interp.bottom_right_neighbors[7] == np.array([-1, 11, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([9, -1, -1])).all()
assert (interp.bottom_right_neighbors[9] == np.array([10, -1, -1])).all()
assert (interp.bottom_right_neighbors[10] == np.array([11, -1, -1])).all()
assert (interp.bottom_right_neighbors[11] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 4])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 4, 5])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 5, 6])).all()
assert (interp.bottom_left_neighbors[3] == np.array([2, 6, 7])).all()
assert (interp.bottom_left_neighbors[4] == np.array([-1, -1, 8])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 8, 9])).all()
assert (interp.bottom_left_neighbors[6] == np.array([5, 9, 10])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, 10, 11])).all()
assert (interp.bottom_left_neighbors[8] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[9] == np.array([8, -1, -1])).all()
assert (interp.bottom_left_neighbors[10] == np.array([9, -1, -1])).all()
assert (interp.bottom_left_neighbors[11] == np.array([10, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, 3])).all()
assert (interp.top_right_neighbors[3] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[4] == np.array([0, 1, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([1, 2, 6])).all()
assert (interp.top_right_neighbors[6] == np.array([2, 3, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([3, -1, -1])).all()
assert (interp.top_right_neighbors[8] == np.array([4, 5, 9])).all()
assert (interp.top_right_neighbors[9] == np.array([5, 6, 10])).all()
assert (interp.top_right_neighbors[10] == np.array([6, 7, 11])).all()
assert (interp.top_right_neighbors[11] == np.array([7, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, -1, 2])).all()
assert (interp.top_left_neighbors[4] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[5] == np.array([0, 1, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([1, 2, 5])).all()
assert (interp.top_left_neighbors[7] == np.array([2, 3, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([-1, 4, -1])).all()
assert (interp.top_left_neighbors[9] == np.array([4, 5, 8])).all()
assert (interp.top_left_neighbors[10] == np.array([5, 6, 9])).all()
assert (interp.top_left_neighbors[11] == np.array([6, 7, 10])).all()
def test___4x3_grid_neighbors_all_correct(self):
# |0| 1| 2|
# |3| 4| 5|
# |6| 7| 8|
# |9|10|11|
interp = interpolation.InterpolationScheme(shape=(4, 3), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 3, 4])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 4, 5])).all()
assert (interp.bottom_right_neighbors[2] == np.array([-1, 5, -1])).all()
assert (interp.bottom_right_neighbors[3] == np.array([4, 6, 7])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 7, 8])).all()
assert (interp.bottom_right_neighbors[5] == np.array([-1, 8, -1])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, 9, 10])).all()
assert (interp.bottom_right_neighbors[7] == np.array([8, 10, 11])).all()
assert (interp.bottom_right_neighbors[8] == np.array([-1, 11, -1])).all()
assert (interp.bottom_right_neighbors[9] == np.array([10, -1, -1])).all()
assert (interp.bottom_right_neighbors[10] == np.array([11, -1, -1])).all()
assert (interp.bottom_right_neighbors[11] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 3])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 3, 4])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 4, 5])).all()
assert (interp.bottom_left_neighbors[3] == np.array([-1, -1, 6])).all()
assert (interp.bottom_left_neighbors[4] == np.array([3, 6, 7])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 7, 8])).all()
assert (interp.bottom_left_neighbors[6] == np.array([-1, -1, 9])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, 9, 10])).all()
assert (interp.bottom_left_neighbors[8] == np.array([7, 10, 11])).all()
assert (interp.bottom_left_neighbors[9] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[10] == np.array([9, -1, -1])).all()
assert (interp.bottom_left_neighbors[11] == np.array([10, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[3] == np.array([0, 1, 4])).all()
assert (interp.top_right_neighbors[4] == np.array([1, 2, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([2, -1, -1])).all()
assert (interp.top_right_neighbors[6] == np.array([3, 4, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([4, 5, 8])).all()
assert (interp.top_right_neighbors[8] == np.array([5, -1, -1])).all()
assert (interp.top_right_neighbors[9] == np.array([6, 7, 10])).all()
assert (interp.top_right_neighbors[10] == np.array([7, 8, 11])).all()
assert (interp.top_right_neighbors[11] == np.array([8, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[4] == np.array([0, 1, 3])).all()
assert (interp.top_left_neighbors[5] == np.array([1, 2, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([-1, 3, -1])).all()
assert (interp.top_left_neighbors[7] == np.array([3, 4, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([4, 5, 7])).all()
assert (interp.top_left_neighbors[9] == np.array([-1, 6, -1])).all()
assert (interp.top_left_neighbors[10] == np.array([6, 7, 9])).all()
assert (interp.top_left_neighbors[11] == np.array([7, 8, 10])).all()
def test___4x4_grid_neighbors_all_correct(self):
# | 0| 1| 2| 3|
# | 4| 5| 6| 7|
# | 8| 9|10|11|
# |12|13|14|15|
interp = interpolation.InterpolationScheme(shape=(4, 4), image_coords=np.array([[1.0, 1.0]]),
image_pixel_scale=1.0)
assert (interp.bottom_right_neighbors[0] == np.array([1, 4, 5])).all()
assert (interp.bottom_right_neighbors[1] == np.array([2, 5, 6])).all()
assert (interp.bottom_right_neighbors[2] == np.array([3, 6, 7])).all()
assert (interp.bottom_right_neighbors[3] == np.array([-1, 7, -1])).all()
assert (interp.bottom_right_neighbors[4] == np.array([5, 8, 9])).all()
assert (interp.bottom_right_neighbors[5] == np.array([6, 9, 10])).all()
assert (interp.bottom_right_neighbors[6] == np.array([7, 10, 11])).all()
assert (interp.bottom_right_neighbors[7] == np.array([-1, 11, -1])).all()
assert (interp.bottom_right_neighbors[8] == np.array([9, 12, 13])).all()
assert (interp.bottom_right_neighbors[9] == np.array([10, 13, 14])).all()
assert (interp.bottom_right_neighbors[10] == np.array([11, 14, 15])).all()
assert (interp.bottom_right_neighbors[11] == np.array([-1, 15, -1])).all()
assert (interp.bottom_right_neighbors[12] == np.array([13, -1, -1])).all()
assert (interp.bottom_right_neighbors[13] == np.array([14, -1, -1])).all()
assert (interp.bottom_right_neighbors[14] == np.array([15, -1, -1])).all()
assert (interp.bottom_right_neighbors[15] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[0] == np.array([-1, -1, 4])).all()
assert (interp.bottom_left_neighbors[1] == np.array([0, 4, 5])).all()
assert (interp.bottom_left_neighbors[2] == np.array([1, 5, 6])).all()
assert (interp.bottom_left_neighbors[3] == np.array([2, 6, 7])).all()
assert (interp.bottom_left_neighbors[4] == np.array([-1, -1, 8])).all()
assert (interp.bottom_left_neighbors[5] == np.array([4, 8, 9])).all()
assert (interp.bottom_left_neighbors[6] == np.array([5, 9, 10])).all()
assert (interp.bottom_left_neighbors[7] == np.array([6, 10, 11])).all()
assert (interp.bottom_left_neighbors[8] == np.array([-1, -1, 12])).all()
assert (interp.bottom_left_neighbors[9] == np.array([8, 12, 13])).all()
assert (interp.bottom_left_neighbors[10] == np.array([9, 13, 14])).all()
assert (interp.bottom_left_neighbors[11] == np.array([10, 14, 15])).all()
assert (interp.bottom_left_neighbors[12] == np.array([-1, -1, -1])).all()
assert (interp.bottom_left_neighbors[13] == np.array([12, -1, -1])).all()
assert (interp.bottom_left_neighbors[14] == np.array([13, -1, -1])).all()
assert (interp.bottom_left_neighbors[15] == np.array([14, -1, -1])).all()
assert (interp.top_right_neighbors[0] == np.array([-1, -1, 1])).all()
assert (interp.top_right_neighbors[1] == np.array([-1, -1, 2])).all()
assert (interp.top_right_neighbors[2] == np.array([-1, -1, 3])).all()
assert (interp.top_right_neighbors[3] == np.array([-1, -1, -1])).all()
assert (interp.top_right_neighbors[4] == np.array([0, 1, 5])).all()
assert (interp.top_right_neighbors[5] == np.array([1, 2, 6])).all()
assert (interp.top_right_neighbors[6] == np.array([2, 3, 7])).all()
assert (interp.top_right_neighbors[7] == np.array([3, -1, -1])).all()
assert (interp.top_right_neighbors[8] == np.array([4, 5, 9])).all()
assert (interp.top_right_neighbors[9] == np.array([5, 6, 10])).all()
assert (interp.top_right_neighbors[10] == np.array([6, 7, 11])).all()
assert (interp.top_right_neighbors[11] == np.array([7, -1, -1])).all()
assert (interp.top_right_neighbors[12] == np.array([8, 9, 13])).all()
assert (interp.top_right_neighbors[13] == np.array([9, 10, 14])).all()
assert (interp.top_right_neighbors[14] == np.array([10, 11, 15])).all()
assert (interp.top_right_neighbors[15] == np.array([11, -1, -1])).all()
assert (interp.top_left_neighbors[0] == np.array([-1, -1, -1])).all()
assert (interp.top_left_neighbors[1] == np.array([-1, -1, 0])).all()
assert (interp.top_left_neighbors[2] == np.array([-1, -1, 1])).all()
assert (interp.top_left_neighbors[3] == np.array([-1, -1, 2])).all()
assert (interp.top_left_neighbors[4] == np.array([-1, 0, -1])).all()
assert (interp.top_left_neighbors[5] == np.array([0, 1, 4])).all()
assert (interp.top_left_neighbors[6] == np.array([1, 2, 5])).all()
assert (interp.top_left_neighbors[7] == np.array([2, 3, 6])).all()
assert (interp.top_left_neighbors[8] == np.array([-1, 4, -1])).all()
assert (interp.top_left_neighbors[9] == np.array([4, 5, 8])).all()
assert (interp.top_left_neighbors[10] == | np.array([5, 6, 9]) | numpy.array |
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from astropy.table import Table, join
from os import chdir, system
from scipy.stats import norm as gauss_norm
from sys import argv
from getopt import getopt
# turn off polyfit ranking warnings
import warnings
warnings.filterwarnings('ignore')
def _prepare_pdf_data(means, stds, range, norm=True):
x_vals = np.linspace(range[0], range[1], 250)
y_vals = np.zeros_like(x_vals)
# create and sum all PDF of stellar abundances
for d_m, d_s in zip(means, stds):
if np.isfinite([d_m, d_s]).all():
y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s)
# return normalized summed pdf of all stars
if norm and np.nansum(y_vals) > 0.:
y_vals = 1. * y_vals/np.nanmax(y_vals)
return x_vals, y_vals
def _prepare_hist_data(d, bins, range, norm=True):
heights, edges = np.histogram(d, bins=bins, range=range)
width = np.abs(edges[0] - edges[1])
if norm:
heights = 1.*heights / np.nanmax(heights)
return edges[:-1], heights, width
def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high):
# diffence to the original data
diff = orig - fit
std_diff = np.nanstd(diff[idx])
# select data that will be fitted
idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low),
diff > (std_diff * sigma_high))
return np.logical_and(idx, ~idx_outlier)
def fit_abund_trend(p_data, a_data,
steps=3, sigma_low=2.5, sigma_high=2.5,
order=5, window=10, n_min_perc=10.,func='poly'):
idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data))
data_len = np.sum(idx_fit)
n_fit_points_prev = np.sum(idx_fit)
if data_len <= order + 1:
return None, None
p_offset = np.nanmedian(p_data)
for i_f in range(steps): # number of sigma clipping steps
if func == 'cheb':
coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order)
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window)
f_data = splev(p_data - p_offset, coef)
idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high)
n_fit_points = np.sum(idx_fit)
if 100.*n_fit_points/data_len < n_min_perc:
break
if n_fit_points == n_fit_points_prev:
break
else:
n_fit_points_prev = n_fit_points
a_std = np.nanstd(a_data - f_data)
return [coef, p_offset], a_std
def eval_abund_trend(p_data, m_data, func='poly'):
coef, p_offset = m_data
if func == 'cheb':
f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef)
if func == 'legen':
f_data = np.polynomial.legendre.legval(p_data - p_offset, coef)
if func == 'poly':
f_data = np.poly1d(coef)(p_data - p_offset)
if func == 'spline':
f_data = splev(p_data - p_offset, coef)
return f_data
simulation_dir = '/shared/data-camelot/cotar/'
data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/'
data_dir = '/shared/ebla/cotar/'
USE_DR3 = True
Q_FLAGS = True
P_INDIVIDUAL = False
suffix = ''
if len(argv) > 1:
# parse input options
opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual='])
# set parameters, depending on user inputs
print(opts)
for o, a in opts:
if o == '--dr3':
USE_DR3 = int(a) > 0
if o == '--suffix':
suffix += str(a)
if o == '--flags':
Q_FLAGS = int(a) > 0
if o == '--individual':
P_INDIVIDUAL = int(a) > 0
CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits')
tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits')
# remove cluster members from tails data
print('Cluster members all:', len(CG_data), len(tails_data))
idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True)
tails_data = tails_data[idx_not_in_cluster]
print('Cluster members all:', len(CG_data), len(tails_data))
if USE_DR3:
# cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits')
cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits')
fe_col = 'fe_h'
teff_col = 'teff'
q_flag = 'flag_sp'
suffix += '_DR3'
else:
pass
if Q_FLAGS:
suffix += '_flag0'
# determine all possible simulation subdirs
chdir(data_dir_clusters)
for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'):
chdir(cluster_dir)
print('Working on clusters in ' + cluster_dir)
for sub_dir in glob('*'):
current_cluster = '_'.join(sub_dir.split('_')[0:2])
source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id']
source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id']
idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg))
idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail))
if '.png' in sub_dir or 'individual-abund' in sub_dir:
continue
print(' ')
print(sub_dir)
chdir(sub_dir)
try:
g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t')
idx_init = np.in1d(cannon_data['source_id'], g_init['source_id'])
except:
idx_init = np.full(len(cannon_data), False)
try:
g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t')
g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster
g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster
g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1.,
g_in['in_cluster_prob'] >= 68.)]
idx_in = np.in1d(cannon_data['source_id'], g_in['source_id'])
idx_in_no_CG = np.logical_and(idx_in,
np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id'])))
except:
idx_in = np.full(len(cannon_data), False)
idx_in_no_CG = np.full(len(cannon_data), False)
try:
g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t')
# further refinement of results to be plotted here
g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0,
g_out['in_cluster_prob'] <= 0)]
idx_out = np.in1d(cannon_data['source_id'], g_out['source_id'])
except:
idx_out = np.full(len(cannon_data), False)
chdir('..')
if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0:
print(' Some Galah lists are missing')
if USE_DR3:
abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)]
else:
abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3]
# abund_cols = ['e_' + cc for cc in abund_cols]
# rg = (0., 0.35)
# yt = [0., 0.1, 0.2, 0.3]
# medfix = '-snr-sigma_'
abund_cols = ['diff_' + cc for cc in abund_cols]
rg = (-0.45, 0.45)
yt = [-0.3, -0.15, 0.0, 0.15, 0.3]
medfix = '-detrended-snr_'
# ------------------------------------------------------------------------------
# NEW: plot with parameter dependency trends
# ------------------------------------------------------------------------------
bs = 40
x_cols_fig = 7
y_cols_fig = 5
param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]}
for param in ['snr_c2_iraf']: #list(param_lims.keys()):
cannon_data['abund_det'] = 0
cannon_data['abund_det_elems'] = 0
print('Estimating membership using parameter', param)
fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10))
for i_c, col in enumerate(abund_cols):
# print(col)
x_p = i_c % x_cols_fig
y_p = int(1. * i_c / x_cols_fig)
fit_x_param = 'teff'
cur_abund_col = '_'.join(col.split('_')[1:])
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col]
idx_val = np.isfinite(cannon_data[col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u4 = np.logical_and(idx_cg_memb, idx_val)
idx_u5 = np.logical_and(idx_tail, idx_val)
fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2],
cannon_data[cur_abund_col][idx_u2],
order=3, steps=2, func='poly',
sigma_low=2.5, sigma_high=2.5, n_min_perc=10.)
if fit_model is not None:
cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly')
else:
cannon_data['diff_' + cur_abund_col] = np.nan
ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1],
lw=0, s=3, color='C2', label='Field')
ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2],
lw=0, s=3, color='C0', label='Initial')
ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3],
lw=0, s=3, color='C1', label='Ejected')
if np.sum(idx_u5) > 0:
print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5)))
ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5],
lw=0, s=3, color='C4', label='Tail')
label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3))
ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add,
ylim=rg,
yticks=yt,)
ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black')
rg = (-0.6, 0.6)
idx_val = np.isfinite(cannon_data[teff_col])
if Q_FLAGS:
idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0)
x_p = -1
y_p = -1
idx_u1 = np.logical_and(idx_out, idx_val)
idx_u2 = np.logical_and(idx_init, idx_val)
idx_u3 = np.logical_and(idx_in, idx_val)
idx_u5 = | np.logical_and(idx_tail, idx_val) | numpy.logical_and |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
# <NAME>
# <NAME>
# <NAME>
# <NAME>
# <NAME>
#
import time
import numpy as np
from pyscf import lib
from pyscf.lib import logger
from pyscf import ao2mo
from pyscf.cc import ccsd
from pyscf.cc import uccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import addons
from pyscf.cc import uintermediates
########################################
# EOM-IP-CCSD
########################################
def vector_to_amplitudes_ip(vector, nmo, nocc):
'''For spin orbitals'''
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
sizes = (nocca, noccb, nocca*(nocca-1)//2*nvira, noccb*nocca*nvira,
nocca*noccb*nvirb, noccb*(noccb-1)//2*nvirb)
sections = np.cumsum(sizes[:-1])
r1a, r1b, r2a, r2baa, r2abb, r2b = np.split(vector, sections)
r2a = r2a.reshape(nocca*(nocca-1)//2,nvira)
r2b = r2b.reshape(noccb*(noccb-1)//2,nvirb)
r2baa = r2baa.reshape(noccb,nocca,nvira).copy()
r2abb = r2abb.reshape(nocca,noccb,nvirb).copy()
idxa = np.tril_indices(nocca, -1)
idxb = np.tril_indices(noccb, -1)
r2aaa = np.zeros((nocca,nocca,nvira), vector.dtype)
r2bbb = np.zeros((noccb,noccb,nvirb), vector.dtype)
r2aaa[idxa[0],idxa[1]] = r2a
r2aaa[idxa[1],idxa[0]] =-r2a
r2bbb[idxb[0],idxb[1]] = r2b
r2bbb[idxb[1],idxb[0]] =-r2b
r1 = (r1a.copy(), r1b.copy())
r2 = (r2aaa, r2baa, r2abb, r2bbb)
return r1, r2
def amplitudes_to_vector_ip(r1, r2):
'''For spin orbitals'''
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
nocca, noccb, nvirb = r2abb.shape
idxa = np.tril_indices(nocca, -1)
idxb = np.tril_indices(noccb, -1)
return np.hstack((r1a, r1b,
r2aaa[idxa].ravel(), r2baa.ravel(),
r2abb.ravel(), r2bbb[idxb].ravel()))
def spatial2spin_ip(r1, r2, orbspin=None):
'''Convert R1/R2 of spatial orbital representation to R1/R2 of
spin-orbital representation
'''
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
nocc_a, nvir_a = r2aaa.shape[1:]
nocc_b, nvir_b = r2bbb.shape[1:]
if orbspin is None:
orbspin = np.zeros((nocc_a+nvir_a)*2, dtype=int)
orbspin[1::2] = 1
nocc = nocc_a + nocc_b
nvir = nvir_a + nvir_b
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
r1 = np.zeros((nocc), dtype=r1a.dtype)
r1[idxoa] = r1a
r1[idxob] = r1b
r2 = np.zeros((nocc**2, nvir), dtype=r2aaa.dtype)
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2aaa = r2aaa.reshape(nocc_a*nocc_a, nvir_a)
r2baa = r2baa.reshape(nocc_b*nocc_a, nvir_a)
r2abb = r2abb.reshape(nocc_a*nocc_b, nvir_b)
r2bbb = r2bbb.reshape(nocc_b*nocc_b, nvir_b)
lib.takebak_2d(r2, r2aaa, idxoaa.ravel(), idxva.ravel())
lib.takebak_2d(r2, r2baa, idxoba.ravel(), idxva.ravel())
lib.takebak_2d(r2, r2abb, idxoab.ravel(), idxvb.ravel())
lib.takebak_2d(r2, r2bbb, idxobb.ravel(), idxvb.ravel())
r2aba = -r2baa
r2bab = -r2abb
lib.takebak_2d(r2, r2aba, idxoab.T.ravel(), idxva.ravel())
lib.takebak_2d(r2, r2bab, idxoba.T.ravel(), idxvb.ravel())
return r1, r2.reshape(nocc, nocc, nvir)
def spin2spatial_ip(r1, r2, orbspin):
nocc, nvir = r2.shape[1:]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocc_a = len(idxoa)
nocc_b = len(idxob)
nvir_a = len(idxva)
nvir_b = len(idxvb)
r1a = r1[idxoa]
r1b = r1[idxob]
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2 = r2.reshape(nocc**2, nvir)
r2aaa = lib.take_2d(r2, idxoaa.ravel(), idxva.ravel())
r2baa = lib.take_2d(r2, idxoba.ravel(), idxva.ravel())
r2abb = lib.take_2d(r2, idxoab.ravel(), idxvb.ravel())
r2bbb = lib.take_2d(r2, idxobb.ravel(), idxvb.ravel())
r2aaa = r2aaa.reshape(nocc_a, nocc_a, nvir_a)
r2baa = r2baa.reshape(nocc_b, nocc_a, nvir_a)
r2abb = r2abb.reshape(nocc_a, nocc_b, nvir_b)
r2bbb = r2bbb.reshape(nocc_b, nocc_b, nvir_b)
return [r1a, r1b], [r2aaa, r2baa, r2abb, r2bbb]
def ipccsd_matvec(eom, vector, imds=None, diag=None):
'''For spin orbitals
R2 operators of the form s_{ij}^{ b}, i.e. indices jb are coupled.'''
# Ref: <NAME>, and <NAME>. Chem. Phys. 136, 174102 (2012) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_ip(vector, (nmoa,nmob), (nocca,noccb))
r1a, r1b = r1
r2aaa, r2baa, r2abb, r2bbb = r2
#Foo, Fov, and Wooov
Hr1a = | np.einsum('me,mie->i', imds.Fov, r2aaa) | numpy.einsum |
import isopy
import numpy as np
import pytest
# calculate_mass_fractionation_factor, remove_mass_fractionation, add_mass_fractionation
def test_mass_fractionation1():
# Testing with input as isotope array
# Using default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'], seed = 46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated1 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, '105pd')
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor)
assert c_fractionated1.keys == unfractionated.keys
assert c_fractionated1.size == unfractionated.size
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated1 = isopy.tb.remove_mass_fractionation(c_fractionated1, mf_factor, '105pd')
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor)
assert c_unfractionated1.keys == unfractionated.keys
assert c_unfractionated1.size == unfractionated.size
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated1, '108pd/105pd')
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key/'105pd')
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated1[key], fractionated)
np.testing.assert_allclose(c_unfractionated1[key], unfractionated[key])
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
#Changing reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
unfractionated2 = unfractionated.ratio('105pd')
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated1 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, '105pd', isotope_masses=mass_ref)
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, isotope_masses=mass_ref)
assert c_fractionated1.keys == unfractionated.keys
assert c_fractionated1.size == unfractionated.size
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated1 = isopy.tb.remove_mass_fractionation(c_fractionated1, mf_factor, '105pd', isotope_masses=mass_ref)
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor, isotope_masses=mass_ref)
assert c_unfractionated1.keys == unfractionated.keys
assert c_unfractionated1.size == unfractionated.size
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated1, '108pd/105pd',
isotope_masses=mass_ref, isotope_fractions=fraction_ref)
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key / '105pd')
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated1[key], fractionated)
np.testing.assert_allclose(c_unfractionated1[key], unfractionated[key])
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
# calculate_mass_fractionation_factor, remove_mass_fractionation, add_mass_fractionation
def test_mass_fractionation2():
# Testing with input as ratio array
# Using default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
unfractionated = unfractionated.ratio('105pd')
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor)
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor)
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated2, '108pd/105pd')
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key)
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated2[key], fractionated)
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
# Changing reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
unfractionated = unfractionated.ratio('105pd')
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, isotope_masses=mass_ref)
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor, isotope_masses=mass_ref)
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated2, '108pd/105pd',
isotope_masses=mass_ref, isotope_fractions=fraction_ref)
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key)
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated2[key], fractionated)
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
class Test_MassIndependentCorrection:
def test_one(self):
# Default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
unfractionated1 = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated1 = unfractionated1 * fraction_ref
unfractionated1['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated1['105pd']
unfractionated2 = unfractionated1.ratio('105pd')
n_unfractionated2 = (unfractionated2 / fraction_ref - 1) * 10000
mf_factor = isopy.random(100, (0, 2), seed=47)
fractionated1 = isopy.tb.add_mass_fractionation(unfractionated2, mf_factor)
fractionated2 = fractionated1.deratio(unfractionated1['105pd'])
self.run(fractionated1, unfractionated2, '108pd/105pd')
self.run(fractionated2, unfractionated2, '108pd/105pd')
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor=10_000)
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor=10_000)
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor='epsilon')
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor='epsilon')
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
unfractionated1 = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated1 = unfractionated1 * fraction_ref
unfractionated1['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated1['105pd']
unfractionated2 = unfractionated1.ratio('105pd')
n_unfractionated2 = (unfractionated2 / fraction_ref - 1) * 10000
mf_factor = isopy.random(100, (0, 2), seed=47)
fractionated1 = isopy.tb.add_mass_fractionation(unfractionated2, mf_factor,
isotope_masses=mass_ref)
fractionated2 = fractionated1.deratio(unfractionated1['105pd'])
self.run(fractionated1, unfractionated2, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated2, unfractionated2, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
def test_two(self):
# With interference correctionn
# We wont get an exact match here so we have to lower the tolerance.
# Default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split(), seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor)
for key in fractionated.keys.filter(element_symbol='pd'):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= fractionated['101ru'] * (mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
fractionated[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= fractionated['111cd'] * (mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
fractionated[key] += cd
correct1 = data.copy(element_symbol = 'pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = (correct1 / fraction_ref - 1) * 10_000
self.run(fractionated, correct1, '108pd/105pd')
self.run(fractionated, correct2, '108pd/105pd', factor=1)
self.run(fractionated, correct3, '108pd/105pd', factor=10_000)
self.run(fractionated, correct3, '108pd/105pd', factor='epsilon')
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split(), seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor, isotope_masses=mass_ref)
for key in fractionated.keys.filter(element_symbol='pd'):
if (ru := fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= fractionated['101ru'] * (
mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
fractionated[key] += ru
if (cd := fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= fractionated['111cd'] * (
mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
fractionated[key] += cd
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = (correct1 / fraction_ref - 1) * 10_000
self.run(fractionated, correct1, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct2, '108pd/105pd', factor=1, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
def test_three(self):
# Normalisations
# Default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(), seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor)
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct1, '108pd/105pd')
self.run(fractionated, correct2, '108pd/105pd', factor=1)
self.run(fractionated, correct3, '108pd/105pd', factor=1000)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt')
self.run(fractionated, correct3, '108pd/105pd', factor='permil')
self.run(fractionated, correct4, '108pd/105pd', factor=10_000)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon')
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000)
self.run(fractionated, correct5, '108pd/105pd', factor='mu')
self.run(fractionated, correct5, '108pd/105pd', factor='ppm')
# Single value
std1 = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(), seed=48)
std1 = std1 * fraction_ref
rstd1 = std1.ratio('pd105')
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / np.mean(rstd1) - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct2, '108pd/105pd', norm_val=rstd1)
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=rstd1)
std1 = np.mean(std1)
rstd1 = np.mean(rstd1)
self.run(fractionated, correct2, '108pd/105pd', norm_val=rstd1)
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=rstd1)
# Multiple
std1 = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(),
seed=48)
std1 = std1 * fraction_ref
rstd1 = std1.ratio('pd105')
std2 = isopy.random(50, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(),
seed=49)
std2 = std2 * fraction_ref
rstd2 = std2.ratio('pd105')
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / (np.mean(rstd1)/2 + np.mean(rstd2)/2) - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct2, '108pd/105pd', norm_val=(rstd1, rstd2))
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=(rstd1, rstd2))
std1 = np.mean(std1)
rstd1 = np.mean(rstd1)
self.run(fractionated, correct2, '108pd/105pd', norm_val=(rstd1, rstd2))
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=(rstd1, rstd2))
std2 = np.mean(std2)
rstd2 = np.mean(rstd2)
self.run(fractionated, correct2, '108pd/105pd', norm_val=(rstd1, rstd2))
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=(rstd1, rstd2))
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(),
seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor, isotope_masses=mass_ref)
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct1, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct2, '108pd/105pd', factor=1, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor=1000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor='permil', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct5, '108pd/105pd', factor='mu', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', mass_ref=mass_ref, fraction_ref=fraction_ref)
def run(self, data, correct, mb_ratio, factor = None, mass_ref = None, fraction_ref=None, norm_val = None):
if type(factor) is str:
func = getattr(isopy.tb.internal_normalisation, factor)
factor2 = None
else:
factor2 = factor
func = isopy.tb.internal_normalisation
kwargs = {}
if factor2 is not None: kwargs['extnorm_factor'] = factor2
if mass_ref is not None: kwargs['isotope_masses'] = mass_ref
if fraction_ref is not None: kwargs['isotope_fractions'] = fraction_ref
if norm_val is not None: kwargs['extnorm_value'] = norm_val
corrected = func(data, mb_ratio, **kwargs)
assert corrected.keys == correct.keys - mb_ratio
assert corrected.size == correct.size
assert corrected.ndim == correct.ndim
for key in corrected.keys:
np.testing.assert_allclose(corrected[key], correct[key])
# mass independent correction
if type(factor) is str:
func = getattr(isopy.tb.mass_independent_correction, factor)
factor2 = None
else:
factor2 = factor
func = isopy.tb.mass_independent_correction
kwargs = {}
if factor2 is not None: kwargs['normalisation_factor'] = factor2
if mass_ref is not None: kwargs['isotope_masses'] = mass_ref
if fraction_ref is not None: kwargs['isotope_fractions'] = fraction_ref
if norm_val is not None: kwargs['normalisation_value'] = norm_val
corrected = func(data, mb_ratio, **kwargs)
assert corrected.keys == correct.keys - mb_ratio
assert corrected.size == correct.size
assert corrected.ndim == correct.ndim
for key in corrected.keys:
np.testing.assert_allclose(corrected[key], correct[key])
class Test_IsobaricInterferences:
def test_one(self):
# No mass fractionation factor
# Single interference isotope
# Default reference values
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
base_data = isopy.random(100, (1, 0.01), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split())
base_data = base_data * fraction_ref
data = base_data.copy()
for key in data.keys.filter(element_symbol='pd'):
data[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data['101ru']
data[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data['111cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '111cd'] = 0
interferences2 = {'ru': ('104pd',), 'cd': ('106pd', '108pd')}
correct2 = base_data.copy()
correct2['101ru', '111cd'] = 0
correct2['102pd'] = data['102pd']
correct2['110pd'] = data['110pd']
self.run(data, data, correct1, correct2, interferences1, interferences2, '105pd')
# Different reference values
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
base_data = isopy.random(100, (1, 0.01), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split())
base_data = base_data * fraction_ref
data = base_data.copy()
for key in data.keys.filter(element_symbol='pd'):
data[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data['101ru']
data[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data['111cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '111cd'] = 0
interferences2 = {'ru': ('104pd',), 'cd': ('106pd', '108pd')}
correct2 = base_data.copy()
correct2['101ru', '111cd'] = 0
correct2['102pd'] = data['102pd']
correct2['110pd'] = data['110pd']
self.run(data, data, correct1, correct2, interferences1, interferences2, '105pd',
fraction_ref=fraction_ref)
def test_two(self):
# No mass fractionation factor
# Multiple interference isotopes
# Default reference values
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
base_data = isopy.random(100, (1, 0.01), keys='99ru 101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd 112cd'.split())
# 112cd > 111cd, 101ru > 99ru
base_data = base_data * fraction_ref
data1 = base_data.copy()
data1['99ru', '111cd'] = -1 # so that we dont accidentally make this the largest isotope
for key in data1.keys.filter(key_neq = '<KEY>'.split()):
data1[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data1['101ru']
data1[key] += fraction_ref.get(f'cd{key.mass_number}/cd112', 0) * data1['112cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '112cd'] = 0
correct1['99ru', '111cd'] = -1
interferences2 = {'ru99': ('104pd',), 'cd111': ('106pd', '108pd')}
data2 = base_data.copy()
data2['ru101', 'cd112'] = -1 # so that we dont accidentally make this the largest isotope
for key in data2.keys.filter(key_neq='ru99 cd111 102pd 110pd'.split()):
data2[key] += fraction_ref.get(f'ru{key.mass_number}/ru99', 0) * data2['99ru']
data2[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data2['111cd']
correct2 = base_data.copy()
correct2['99ru', '111cd'] = 0
correct2['101ru', '112cd'] = -1
self.run(data1, data2, correct1, correct2, interferences1, interferences2, '105pd')
# Different reference values
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
base_data = isopy.random(100, (1, 0.01),
keys='99ru 101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd 112cd'.split())
# 112cd > 111cd, 101ru > 99ru
base_data = base_data * fraction_ref
data1 = base_data.copy()
data1['99ru', '111cd'] = -1 # so that we dont accidentally make this the largest isotope
for key in data1.keys.filter(key_neq='<KEY>'.split()):
data1[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data1['101ru']
data1[key] += fraction_ref.get(f'cd{key.mass_number}/cd112', 0) * data1['112cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '112cd'] = 0
correct1['99ru', '111cd'] = -1
interferences2 = {'ru99': ('104pd',), 'cd111': ('106pd', '108pd')}
data2 = base_data.copy()
data2['ru101', 'cd112'] = -1 # so that we dont accidentally make this the largest isotope
for key in data2.keys.filter(key_neq='<KEY>'.split()):
data2[key] += fraction_ref.get(f'ru{key.mass_number}/ru99', 0) * data2['99ru']
data2[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data2['111cd']
correct2 = base_data.copy()
correct2['99ru', '111cd'] = 0
correct2['101ru', '112cd'] = -1
self.run(data1, data2, correct1, correct2, interferences1, interferences2, '105pd',
fraction_ref=fraction_ref)
def test_three(self):
#Mass fractionation
#Single interference isotope
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
base_data = isopy.random(100, (1, 0.01),
keys='<KEY>'.split())
mf_factor = isopy.random(100, (0,2))
base_data = base_data * fraction_ref
data = base_data.copy()
for key in data.keys.filter(element_symbol='pd'):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= data['101ru'] * (mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
data[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= data['111cd'] * (mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
data[key] += cd
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '111cd'] = 0
interferences2 = {'ru': ('104pd',), 'cd': ('106pd', '108pd')}
correct2 = base_data.copy()
correct2['101ru', '111cd'] = 0
correct2['102pd'] = data['102pd']
correct2['110pd'] = data['110pd']
self.run(data, data, correct1, correct2, interferences1, interferences2, '105pd',
mf_factor=mf_factor)
#M Multiple interference isotopes
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
base_data = isopy.random(100, (1, 0.01),
keys='99ru 101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd 112cd'.split())
# 112cd > 111cd, 101ru > 99ru
base_data = base_data * fraction_ref
data1 = base_data.copy()
data1['99ru', '111cd'] = -1 # so that we dont accidentally make this the largest isotope
for key in data1.keys.filter(key_neq='<KEY>'.split()):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= data1['101ru'] * (mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
data1[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd112', 0)) > 0:
cd *= data1['cd112'] * (mass_ref.get(f'cd{key.mass_number}/cd112', 0) ** mf_factor)
data1[key] += cd
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '112cd'] = 0
correct1['99ru', '111cd'] = -1
interferences2 = {'ru99': ('104pd',), 'cd111': ('106pd', '108pd')}
data2 = base_data.copy()
data2['ru101', 'cd112'] = -1 # so that we dont accidentally make this the largest isotope
for key in data2.keys.filter(key_neq='ru99 cd111 102pd 110pd'.split()):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru99', 0)) > 0:
ru *= data2['ru99'] * (mass_ref.get(f'ru{key.mass_number}/ru99', 0) ** mf_factor)
data2[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= data2['111cd'] * (mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
data2[key] += cd
correct2 = base_data.copy()
correct2['99ru', '111cd'] = 0
correct2['101ru', '112cd'] = -1
self.run(data1, data2, correct1, correct2, interferences1, interferences2, '105pd',
fraction_ref=fraction_ref, mass_ref=mass_ref, mf_factor=mf_factor)
def run(self, data1, data2, correct1, correct2, interferences1, interferences2, denom=None,
mf_factor=None, fraction_ref=None, mass_ref=None):
interferences = isopy.tb.find_isobaric_interferences('pd', data1)
assert len(interferences) == len(interferences)
for key in interferences1:
assert key in interferences
assert interferences[key] == interferences1[key]
corrected1 = isopy.tb.remove_isobaric_interferences(data1, interferences,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected1.keys == correct1.keys
assert corrected1.size == correct1.size
for key in corrected1.keys:
np.testing.assert_allclose(corrected1[key], correct1[key])
corrected2 = isopy.tb.remove_isobaric_interferences(data2, interferences2,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected2.keys == correct2.keys
assert corrected2.size == correct2.size
for key in corrected2.keys:
np.testing.assert_allclose(corrected2[key], correct2[key])
#Ratio test data
if denom is not None:
data1 = data1.ratio(denom)
data2 = data2.ratio(denom)
correct1 = correct1.ratio(denom)
correct2 = correct2.ratio(denom)
interferences = isopy.tb.find_isobaric_interferences('pd', data1)
assert len(interferences) == len(interferences)
for key in interferences1:
assert key in interferences
assert interferences[key] == interferences1[key]
corrected1 = isopy.tb.remove_isobaric_interferences(data1, interferences,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected1.keys == correct1.keys
assert corrected1.size == correct1.size
for key in corrected1.keys:
np.testing.assert_allclose(corrected1[key], correct1[key])
corrected2 = isopy.tb.remove_isobaric_interferences(data2, interferences2,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected2.keys == correct2.keys
assert corrected2.size == correct2.size
for key in corrected2.keys:
np.testing.assert_allclose(corrected2[key], correct2[key])
def test_find(self):
interferences = isopy.tb.find_isobaric_interferences('pd', ('ru', 'cd'))
assert len(interferences) == 2
assert 'ru' in interferences
assert interferences['ru'] == ('102Pd', '104Pd')
assert 'cd' in interferences
assert interferences['cd'] == ('106Pd', '108Pd', '110Pd')
interferences = isopy.tb.find_isobaric_interferences('pd', ('ru', 'rh', 'ag', 'cd'))
assert len(interferences) == 2
assert 'ru' in interferences
assert interferences['ru'] == ('102Pd', '104Pd')
assert 'cd' in interferences
assert interferences['cd'] == ('106Pd', '108Pd', '110Pd')
interferences = isopy.tb.find_isobaric_interferences('ce')
assert len(interferences) == 4
assert 'xe' in interferences
assert interferences['xe'] == ('136Ce',)
assert 'ba' in interferences
assert interferences['ba'] == ('136Ce', '138Ce')
assert 'la' in interferences
assert interferences['la'] == ('138Ce', )
assert 'nd' in interferences
assert interferences['nd'] == ('142Ce',)
interferences = isopy.tb.find_isobaric_interferences('138ce')
assert len(interferences) == 2
assert 'ba' in interferences
assert interferences['ba'] == ('138Ce',)
assert 'la' in interferences
assert interferences['la'] == ('138Ce',)
interferences = isopy.tb.find_isobaric_interferences('zn', ('ni', 'ge', 'ba++'))
assert len(interferences) == 3
assert 'ni' in interferences
assert interferences['ni'] == ('64Zn',)
assert 'ge' in interferences
assert interferences['ge'] == ('70Zn',)
assert 'ba++' in interferences
assert interferences['ba++'] == ('66Zn', '67Zn', '68Zn')
class Test_rDelta():
def test_rDelta1(self):
# Data is a single value
data = isopy.refval.isotope.fraction.to_array(element_symbol='pd')
# Dict
reference = isopy.refval.isotope.fraction
correct1 = isopy.zeros(None, data.keys)
correct2 = isopy.ones(None, data.keys)
self.run(data, data, reference, correct1, correct2)
# Single array
reference = isopy.random(100, keys=data.keys)
correct1 = data / np.mean(reference) - 1
correct2 = data / np.mean(reference)
self.run(data, data, reference, correct1, correct2)
self.run(data, data, np.mean(reference), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, reference, correct1, correct2, 10_000)
self.run(data, data, np.mean(reference), correct1, correct2, 10_000)
# Multiple values
reference1 = isopy.random(100, keys=data.keys)
reference2 = isopy.random(100, keys=data.keys)
meanmean = np.mean(reference1)/2 + np.mean(reference2)/2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data, (reference1, reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
# Keys that do not match
data2 = data.copy()
data2['105pd', '106pd'] = np.nan
reference1 = isopy.random(100, keys='101ru 102pd 104pd 105pd 108pd 110pd 111cd'.split())
reference2 = isopy.random(100, keys='101ru 102pd 104pd 106pd 108pd 110pd 111cd'.split())
meanmean = np.mean(reference1) / 2 + np.mean(reference2) / 2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data2, (reference1, reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data2, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
def test_rDelta2(self):
data = isopy.random(100, keys=isopy.refval.element.isotopes['pd'])
data = data * isopy.refval.isotope.fraction
# Dict
reference = isopy.refval.isotope.fraction
correct1 = data / reference - 1
correct2 = data / reference
self.run(data, data, reference, correct1, correct2)
# Single array
reference = isopy.random(100, keys=data.keys)
correct1 = data / np.mean(reference) - 1
correct2 = data / np.mean(reference)
self.run(data, data, reference, correct1, correct2)
self.run(data, data, np.mean(reference), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, reference, correct1, correct2, 10_000)
self.run(data, data, np.mean(reference), correct1, correct2, 10_000)
# Multiple values
reference1 = isopy.random(100, keys=data.keys)
reference2 = isopy.random(100, keys=data.keys)
meanmean = np.mean(reference1)/2 + np.mean(reference2)/2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data, (reference1, reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
# Keys that do not match
data2 = data.copy()
data2['105pd', '106pd'] = np.nan
reference1 = isopy.random(100, keys='101ru 102pd 104pd 105pd 108pd 110pd 111cd'.split())
reference2 = isopy.random(100, keys='101ru 102pd 104pd 106pd 108pd 110pd 111cd'.split())
meanmean = np.mean(reference1) / 2 + np.mean(reference2) / 2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data2, (reference1, reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data2, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
def test_presets(self):
data = isopy.random(100, keys=isopy.refval.element.isotopes['pd'])
data = data * isopy.refval.isotope.fraction
reference = isopy.refval.isotope.fraction
correct = (data / reference - 1) * 1000
normalised = isopy.tb.rDelta.ppt(data, reference)
denormalised = isopy.tb.inverse_rDelta.ppt(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 1000
normalised = isopy.tb.rDelta.permil(data, reference)
denormalised = isopy.tb.inverse_rDelta.permil(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 10_000
normalised = isopy.tb.rDelta.epsilon(data, reference)
denormalised = isopy.tb.inverse_rDelta.epsilon(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 1_000_000
normalised = isopy.tb.rDelta.mu(data, reference)
denormalised = isopy.tb.inverse_rDelta.mu(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 1_000_000
normalised = isopy.tb.rDelta.ppm(data, reference)
denormalised = isopy.tb.inverse_rDelta.ppm(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
def run(self, data1, data2, reference_value, correct1, correct2, factor=1):
normalised = isopy.tb.rDelta(data1, reference_value, factor=factor)
assert normalised.keys == data1.keys
assert normalised.size == data1.size
assert normalised.ndim == data1.ndim
for key in normalised.keys:
np.testing.assert_allclose(normalised[key], correct1[key])
denormalised = isopy.tb.inverse_rDelta(normalised, reference_value, factor=factor)
assert denormalised.keys == data1.keys
assert denormalised.size == data1.size
assert denormalised.ndim == data1.ndim
for key in denormalised.keys:
np.testing.assert_allclose(denormalised[key], data2[key])
normalised = isopy.tb.rDelta(data1, reference_value, factor=factor, deviations=0)
assert normalised.keys == data1.keys
assert normalised.size == data1.size
assert normalised.ndim == data1.ndim
for key in normalised.keys:
| np.testing.assert_allclose(normalised[key], correct2[key]) | numpy.testing.assert_allclose |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["TimeSeries"]
import numpy as np
from itertools import izip
from .frame import Frame
from ._kpsf import solve, N_INT_TIME
class TimeSeries(object):
def __init__(self, time, flux_images, ferr_images, quality, **kwargs):
# Initialize the frame images.
self.time = time
self.frames = []
for i, (f, fe) in enumerate(izip(flux_images, ferr_images)):
frame = []
if quality[i] == 0:
frame = Frame(f, fe, **kwargs)
if not np.any(frame.mask):
frame = []
self.frames.append(frame)
# Save the frame shape.
self.shape = self.frames[0].shape
if any(f.shape != self.shape for f in self.frames if len(f)):
raise ValueError("Frame shape mismatch")
# Update the frames to have a coherent time series.
self.initialize()
def initialize(self):
# Traverse the graph and construct the (greedy) best path.
ns = min(map(len, filter(len, self.frames)))
metric = np.array([1.0, 1.0, 1e-8])
current = None
for t, node in enumerate(self.frames):
if not len(node):
continue
if current is None:
current = node.coords[:ns]
node.coords = current
continue
# Compute the set of distances between this node and the current
# position.
r = sum([(node.coords[k][:, None] - current[k][None, :]) ** 2
* metric[i] for i, k in enumerate(("x", "y", "flux"))])
r0 = np.array(r)
# Loop over the permutations and greedily choose the best update.
rows, cols = np.arange(r.shape[0]), np.arange(r.shape[1])
update = np.nan + | np.zeros(ns) | numpy.zeros |
# -*- coding: utf-8 -*-
import os
import argparse
import random
import numpy as np
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
from models.resnet12_2 import resnet12
from models.meta_part_inference_mini import ProtoComNet
from models.PredTrainHead import LinearClassifier, LinearRotateHead
from utils import set_gpu, Timer, count_accuracy, check_dir, log
import pickle
def one_hot(indices, depth):
"""
Returns a one-hot tensor.
This is a PyTorch equivalent of Tensorflow's tf.one_hot.
Parameters:
indices: a (n_batch, m) Tensor or (m) Tensor.
depth: a scalar. Represents the depth of the one hot dimension.
Returns: a (n_batch, m, depth) Tensor or (m, depth) Tensor.
"""
encoded_indicies = torch.zeros(indices.size() + torch.Size([depth])).cuda()
index = indices.view(indices.size()+torch.Size([1]))
encoded_indicies = encoded_indicies.scatter_(1,index,1)
return encoded_indicies
def get_model(options):
# Choose the embedding network
if options.network == 'ResNet':
network = resnet12().cuda()
network = torch.nn.DataParallel(network)
fea_dim = 512
else:
print ("Cannot recognize the network type")
assert(False)
propa_head = ProtoComNet(opt=options, in_dim=fea_dim).cuda()
# Choose the classification head
if opt.use_trainval == 'True':
n_classes=80
else:
n_classes=64
if options.pre_head == 'LinearNet':
pre_head = LinearClassifier(in_dim=fea_dim, n_classes=n_classes).cuda()
elif options.pre_head == 'LinearRotateNet':
pre_head = LinearRotateHead(in_dim=fea_dim, n_classes=n_classes).cuda()
else:
print("Cannot recognize the dataset type")
assert (False)
if options.phase == 'pretrain':
from models.classification_heads_orgin import ClassificationHead
else:
from models.classification_heads import ClassificationHead
# Choose the classification head
if options.head == 'CosineNet':
cls_head = ClassificationHead(base_learner='CosineNet').cuda()
elif options.head == 'FuseCosNet':
cls_head = ClassificationHead(base_learner='FuseCos').cuda()
else:
print ("Cannot recognize the dataset type")
assert(False)
return (network, propa_head, pre_head, cls_head)
def get_dataset(options):
# Choose the embedding network
if options.dataset == 'miniImageNet':
from data.mini_imagenet import MiniImageNet, FewShotDataloader, MiniImageNetPC
# dataset_trainval = MiniImageNet(phase='trainval')
if options.phase == 'savepart':
dataset_train = MiniImageNet(phase='train', do_not_use_random_transf=True)
elif options.phase == 'metainfer':
dataset_train = MiniImageNetPC(phase='train', shot=options.train_shot)
else:
dataset_train = MiniImageNet(phase='train')
dataset_val = MiniImageNet(phase='val')
dataset_test = MiniImageNet(phase='test')
data_loader = FewShotDataloader
else:
print ("Cannot recognize the dataset type")
assert(False)
return (dataset_train, dataset_val, dataset_test, data_loader)
def seed_torch(seed=21):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def pre_train(opt, dataset_train, dataset_val, dataset_test, data_loader):
data_loader_pre = torch.utils.data.DataLoader
# Dataloader of Gidaris & Komodakis (CVPR 2018)
if opt.use_trainval == 'True':
train_way = 80
dloader_train = data_loader_pre(
dataset=dataset_trainval,
batch_size=128,
shuffle=True,
num_workers=4
)
else:
train_way = 64
dloader_train = data_loader_pre(
dataset=dataset_train,
batch_size=128,
shuffle=True,
num_workers=4
)
dloader_val = data_loader(
dataset=dataset_val,
nKnovel=opt.test_way,
nKbase=0,
nExemplars=opt.val_shot, # num training examples per novel category
nTestNovel=opt.val_query * opt.test_way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=0,
epoch_size=1 * opt.val_episode, # num of batches per epoch
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
print(list(dict(propa_head.named_parameters()).keys()))
optimizer = torch.optim.SGD([{'params': embedding_net.parameters()},
{'params': pre_head.parameters()}], lr=0.1, momentum=0.9, \
weight_decay=5e-4, nesterov=True)
lambda_epoch = lambda e: 1.0 if e < 60 else (0.1 if e < 80 else 0.01 if e < 90 else (0.001))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch, last_epoch=-1)
max_val_acc = 0.0
max_test_acc = 0.0
timer = Timer()
x_entropy = torch.nn.CrossEntropyLoss()
for epoch in range(1, opt.num_epoch + 1):
# Train on the training split
lr_scheduler.step()
# Fetch the current epoch's learning rate
epoch_learning_rate = 0.1
for param_group in optimizer.param_groups:
epoch_learning_rate = param_group['lr']
log(log_file_path, 'Train Epoch: {}\tLearning Rate: {:.4f}'.format(
epoch, epoch_learning_rate))
_, _, _, _ = [x.train() for x in (embedding_net, propa_head, pre_head, cls_head)]
train_accuracies = []
train_losses = []
for i, batch in enumerate(tqdm(dloader_train), 1):
data, labels = [x.cuda() for x in batch]
if opt.pre_head == 'LinearNet' or opt.pre_head == 'CosineNet':
emb = embedding_net(data)
logit = pre_head(emb)
smoothed_one_hot = one_hot(labels.reshape(-1), train_way)
smoothed_one_hot = smoothed_one_hot * (1 - opt.eps) + (1 - smoothed_one_hot) * opt.eps / (train_way - 1)
log_prb = F.log_softmax(logit.reshape(-1, train_way), dim=1)
loss = -(smoothed_one_hot * log_prb).sum(dim=1)
loss = loss.mean()
acc = count_accuracy(logit.reshape(-1, train_way), labels.reshape(-1))
elif opt.pre_head == 'LinearRotateNet' or opt.pre_head == 'DistRotateNet':
x_ = []
y_ = []
a_ = []
for j in range(data.shape[0]):
x90 = data[j].transpose(2, 1).flip(1)
x180 = x90.transpose(2, 1).flip(1)
x270 = x180.transpose(2, 1).flip(1)
x_ += [data[j], x90, x180, x270]
y_ += [labels[j] for _ in range(4)]
a_ += [torch.tensor(0), torch.tensor(1), torch.tensor(2), torch.tensor(3)]
x_ = Variable(torch.stack(x_, 0)).cuda()
y_ = Variable(torch.stack(y_, 0)).cuda()
a_ = Variable(torch.stack(a_, 0)).cuda()
emb = embedding_net(x_)
# print(emb.shape)
logit = pre_head(emb, use_cls=True)
logit_rotate = pre_head(emb, use_cls=False)
smoothed_one_hot = one_hot(y_.reshape(-1), train_way)
smoothed_one_hot = smoothed_one_hot * (1 - opt.eps) + (1 - smoothed_one_hot) * opt.eps / (train_way - 1)
log_prb = F.log_softmax(logit.reshape(-1, train_way), dim=1)
loss = -(smoothed_one_hot * log_prb).sum(dim=1)
loss = loss.mean()
rloss = F.cross_entropy(input=logit_rotate, target=a_)
loss = 0.5 * loss + 0.5 * rloss
acc = count_accuracy(logit.reshape(-1, train_way), y_.reshape(-1))
else:
print("Cannot recognize the pre_head type")
assert (False)
train_accuracies.append(acc.item())
train_losses.append(loss.item())
if (i % 100 == 0):
train_acc_avg = np.mean(np.array(train_accuracies))
log(log_file_path, 'Train Epoch: {}\tBatch: [{}]\tLoss: {}\tAccuracy: {} % ({} %)'.format(
epoch, i, loss.item(), train_acc_avg, acc))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Evaluate on the validation split
_, _, _, _ = [x.eval() for x in (embedding_net, propa_head, pre_head, cls_head)]
val_accuracies = []
val_losses = []
for i, batch in enumerate(tqdm(dloader_val(opt.seed)), 1):
data_support, labels_support, \
data_query, labels_query, _, _ = [
x.cuda() for x in batch]
test_n_support = opt.test_way * opt.val_shot
test_n_query = opt.test_way * opt.val_query
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(1, test_n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(1, test_n_query, -1)
logit_query = cls_head(emb_query, emb_support, labels_support, opt.test_way, opt.val_shot)
loss = x_entropy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
acc = count_accuracy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
val_accuracies.append(acc.item())
val_losses.append(loss.item())
val_acc_avg = np.mean(np.array(val_accuracies))
val_acc_ci95 = 1.96 * np.std(np.array(val_accuracies)) / np.sqrt(opt.val_episode)
val_loss_avg = np.mean(np.array(val_losses))
if val_acc_avg > max_val_acc:
max_val_acc = val_acc_avg
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()}, \
os.path.join(opt.save_path, 'best_pretrain_model.pth'))
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} % (Best)' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
else:
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} %' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'last_pretrain_epoch.pth'))
if epoch % opt.save_epoch == 0:
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'epoch_{}_pretrain.pth'.format(epoch)))
def part_prototype(opt, dataset_train, dataset_val, dataset_test, data_loader):
data_loader_pre = torch.utils.data.DataLoader
# Dataloader of Gidaris & Komodakis (CVPR 2018)
dloader_train = data_loader_pre(
dataset=dataset_train,
batch_size=1,
shuffle=False,
num_workers=0
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
# Load saved model checkpoints
saved_models = torch.load(os.path.join(opt.save_path, 'best_pretrain_model.pth'))
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
embs = []
for i, batch in enumerate(tqdm(dloader_train), 1):
data, labels = [x.cuda() for x in batch]
with torch.no_grad():
emb = embedding_net(data)
embs.append(emb)
embs = torch.cat(embs, dim=0)
with open('./data/mini_imagenet_part_prior_train.pickle', 'rb') as handle:
part_prior = pickle.load(handle)
train_class_name_file = './data/mini_imagenet_catname2label_train.pickle'
with open(train_class_name_file, 'rb') as handle:
catname2label_train = pickle.load(handle)
a = 1
attr_feature = {}
for attr_id in part_prior['attribute_id_class_dict'].keys():
if attr_id not in [part_prior['wnids2id'][wnid] for wnid in part_prior['all_wnids']]:
attr_im_id = []
for sel_class_id in list(set(part_prior['attribute_id_class_dict'][attr_id])):
if sel_class_id in [part_prior['wnids2id'][wnid] for wnid in part_prior['wnids_train']]:
sel_class = catname2label_train[part_prior['id2wnids'][sel_class_id]]
attr_im_id.extend(dataset_train.label2ind[sel_class])
attr_im = embs[attr_im_id, :]
mean = torch.mean(attr_im, dim=0).unsqueeze(dim=0)
std = torch.std(attr_im, dim=0).unsqueeze(dim=0)
attr_feature[attr_id] = {'mean': mean, 'std':std}
with open(os.path.join(opt.save_path, "mini_imagenet_metapart_feature.pickle"), 'wb') as handle:
pickle.dump(attr_feature, handle, protocol=pickle.HIGHEST_PROTOCOL)
class_feature = {}
for class_id in part_prior['class_attribute_id_dict'].keys():
if class_id in [part_prior['wnids2id'][wnid] for wnid in part_prior['wnids_train']]:
sel_class = catname2label_train[part_prior['id2wnids'][class_id]]
class_im = embs[dataset_train.label2ind[sel_class], :]
mean = torch.mean(class_im, dim=0).unsqueeze(dim=0)
std = torch.std(class_im, dim=0).unsqueeze(dim=0)
class_feature[sel_class] = {'mean': mean, 'std':std}
with open(os.path.join(opt.save_path, "mini_imagenet_class_feature.pickle"), 'wb') as handle:
pickle.dump(class_feature, handle, protocol=pickle.HIGHEST_PROTOCOL)
def meta_inference(opt, dataset_train, dataset_val, dataset_test, data_loader):
data_loader_pre = torch.utils.data.DataLoader
# Dataloader of Gidaris & Komodakis (CVPR 2018)
dloader_train = data_loader_pre(
dataset=dataset_train,
batch_size=128,
shuffle=True,
num_workers=0
)
dloader_val = data_loader(
dataset=dataset_val,
nKnovel=opt.test_way,
nKbase=0,
nExemplars=opt.val_shot, # num training examples per novel category
nTestNovel=opt.val_query * opt.test_way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=0,
epoch_size=1 * opt.val_episode, # num of batches per epoch
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
# Load saved model checkpoints
saved_models = torch.load(os.path.join(opt.save_path, 'best_pretrain_model.pth'))
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
cls_head.eval()
optimizer = torch.optim.SGD([{'params': propa_head.parameters()}], lr=0.1, momentum=0.9, \
weight_decay=5e-4, nesterov=True)
lambda_epoch = lambda e: 1.0 if e < 15 else (0.1 if e < 40 else 0.01 if e < 80 else (0.001))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch, last_epoch=-1)
train_losses = []
x_entropy = torch.nn.CrossEntropyLoss()
max_loss = 10e16
max_val_acc = 0
max_test_acc = 0
for epoch in range(0, opt.num_epoch + 1):
# Train on the training split
lr_scheduler.step()
# Fetch the current epoch's learning rate
epoch_learning_rate = 0.1
for param_group in optimizer.param_groups:
epoch_learning_rate = param_group['lr']
log(log_file_path, 'Train Epoch: {}\tLearning Rate: {:.4f}'.format(
epoch, epoch_learning_rate))
propa_head.train()
train_accuracies = []
for i, batch in enumerate(tqdm(dloader_train), 1):
data, labels = [x.cuda() for x in batch]
nb, ns, nc, nw, nh = data.shape
with torch.no_grad():
data = data.reshape(nb*ns, nc, nw, nh)
emb = embedding_net(data)
emb = emb.reshape(nb, ns, -1)
emb = emb.mean(dim=1)
proto, proto_true = propa_head(emb, labels)
loss = F.mse_loss(proto, proto_true)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses.append(loss.item())
if (i % 10 == 0):
train_loss_avg = np.mean(np.array(train_losses))
log(log_file_path, 'Train Epoch: {}\tBatch: [{}]\tLoss: {}({})'.format(
epoch, i, loss.item(), train_loss_avg))
# Evaluate on the validation split
_, _, _, _ = [x.eval() for x in (embedding_net, propa_head, pre_head, cls_head)]
val_accuracies = []
val_losses = []
for i, batch in enumerate(tqdm(dloader_val(opt.seed)), 1):
data_support, labels_support, \
data_query, labels_query, k_all, _ = [
x.cuda() for x in batch]
test_n_support = opt.test_way * opt.val_shot
test_n_query = opt.test_way * opt.val_query
with torch.no_grad():
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(1, test_n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(1, test_n_query, -1)
logit_query = cls_head(k_all, propa_head, emb_query, emb_support, labels_support, opt.test_way, opt.val_shot, is_scale=True)
loss = x_entropy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
acc = count_accuracy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
val_accuracies.append(acc.item())
val_losses.append(loss.item())
val_acc_avg = np.mean(np.array(val_accuracies))
val_acc_ci95 = 1.96 * np.std(np.array(val_accuracies)) / np.sqrt(opt.val_episode)
val_loss_avg = np.mean(np.array(val_losses))
if val_acc_avg > max_val_acc:
max_val_acc = val_acc_avg
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()}, \
os.path.join(opt.save_path, 'best_pretrain_model_meta_infer_val_{}w_{}s_{}.pth'.format(opt.test_way, opt.val_shot, opt.head)))
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} % (Best)' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
else:
log(log_file_path, 'Validation Epoch: {}\t\t\tLoss: {:.4f}\tAccuracy: {:.2f} ± {:.2f} %' \
.format(epoch, val_loss_avg, val_acc_avg, val_acc_ci95))
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'last_pretrain_epoch_meta_infer.pth'))
if epoch % opt.save_epoch == 0:
torch.save({'embedding': embedding_net.state_dict(), 'propa_head': propa_head.state_dict(),
'pre_head': pre_head.state_dict(), 'cls_head': cls_head.state_dict()} \
, os.path.join(opt.save_path, 'epoch_{}_pretrain_meta_infer.pth'.format(epoch)))
def meta_train(opt, dataset_train, dataset_val, dataset_test, data_loader):
# Dataloader of Gidaris & Komodakis (CVPR 2018)
dloader_train = data_loader(
dataset=dataset_train,
nKnovel=opt.train_way,
nKbase=0,
nExemplars=opt.train_shot, # num training examples per novel category
nTestNovel=opt.train_way * opt.train_query, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=opt.episodes_per_batch,
num_workers=4,
epoch_size=opt.episodes_per_batch * 100, # num of batches per epoch
)
dloader_val = data_loader(
dataset=dataset_val,
nKnovel=opt.test_way,
nKbase=0,
nExemplars=opt.val_shot, # num training examples per novel category
nTestNovel=opt.val_query * opt.test_way, # num test examples for all the novel categories
nTestBase=0, # num test examples for all the base categories
batch_size=1,
num_workers=0,
epoch_size=1 * opt.val_episode, # num of batches per epoch
)
set_gpu(opt.gpu)
check_dir('./experiments/')
check_dir(opt.save_path)
log_file_path = os.path.join(opt.save_path, "train_log.txt")
log(log_file_path, str(vars(opt)))
(embedding_net, propa_head, pre_head, cls_head) = get_model(opt)
# Load saved model checkpoints
saved_models = torch.load(os.path.join(opt.save_path, 'best_pretrain_model_meta_infer_val_{}w_{}s_{}.pth'.format(opt.test_way, opt.val_shot, opt.head)))
embedding_net.load_state_dict(saved_models['embedding'])
embedding_net.eval()
propa_head.load_state_dict(saved_models['propa_head'])
propa_head.eval()
optimizer = torch.optim.SGD([{'params': embedding_net.parameters()},
{'params': propa_head.parameters()},
{'params': cls_head.parameters()}], lr=0.0001, momentum=0.9, \
weight_decay=5e-4, nesterov=True)
lambda_epoch = lambda e: 1.0 if e < 15 else (0.1 if e < 25 else 0.01 if e < 30 else (0.001))
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_epoch, last_epoch=-1)
max_val_acc = 0.0
max_test_acc = 0.0
timer = Timer()
x_entropy = torch.nn.CrossEntropyLoss()
for epoch in range(0, opt.num_epoch + 1):
if epoch != 0:
# Train on the training split
lr_scheduler.step()
# Fetch the current epoch's learning rate
epoch_learning_rate = 0.1
for param_group in optimizer.param_groups:
epoch_learning_rate = param_group['lr']
log(log_file_path, 'Train Epoch: {}\tLearning Rate: {:.4f}'.format(
epoch, epoch_learning_rate))
_, _, _ = [x.train() for x in (embedding_net, propa_head, cls_head)]
train_accuracies = []
train_losses = []
for i, batch in enumerate(tqdm(dloader_train(epoch)), 1):
data_support, labels_support, \
data_query, labels_query, k_all, _ = [
x.cuda() for x in batch]
train_n_support = opt.train_way * opt.train_shot
train_n_query = opt.train_way * opt.train_query
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(opt.episodes_per_batch, train_n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(opt.episodes_per_batch, train_n_query, -1)
logit_query = cls_head(k_all, propa_head, emb_query, emb_support, labels_support, opt.train_way, opt.train_shot, is_scale=False)
smoothed_one_hot = one_hot(labels_query.reshape(-1), opt.train_way)
smoothed_one_hot = smoothed_one_hot * (1 - opt.eps) + (1 - smoothed_one_hot) * opt.eps / (opt.train_way - 1)
log_prb = F.log_softmax(logit_query.reshape(-1, opt.train_way), dim=1)
loss = -(smoothed_one_hot * log_prb).sum(dim=1)
loss = loss.mean()
acc = count_accuracy(logit_query.reshape(-1, opt.train_way), labels_query.reshape(-1))
train_accuracies.append(acc.item())
train_losses.append(loss.item())
if (i % 10 == 0):
train_acc_avg = np.mean(np.array(train_accuracies))
log(log_file_path, 'Train Epoch: {}\tBatch: [{}]\tLoss: {}\tAccuracy: {} % ({} %)'.format(
epoch, i, loss.item(), train_acc_avg, acc))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Evaluate on the validation split
_, _, _ = [x.eval() for x in (embedding_net, propa_head, cls_head)]
val_accuracies = []
val_losses = []
for i, batch in enumerate(tqdm(dloader_val(opt.seed)), 1):
data_support, labels_support, \
data_query, labels_query, k_all, _ = [
x.cuda() for x in batch]
test_n_support = opt.test_way * opt.val_shot
test_n_query = opt.test_way * opt.val_query
with torch.no_grad():
emb_support = embedding_net(data_support.reshape([-1] + list(data_support.shape[-3:])))
emb_support = emb_support.reshape(1, test_n_support, -1)
emb_query = embedding_net(data_query.reshape([-1] + list(data_query.shape[-3:])))
emb_query = emb_query.reshape(1, test_n_query, -1)
logit_query = cls_head(k_all, propa_head, emb_query, emb_support, labels_support, opt.test_way, opt.val_shot, is_scale=True)
loss = x_entropy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
acc = count_accuracy(logit_query.reshape(-1, opt.test_way), labels_query.reshape(-1))
val_accuracies.append(acc.item())
val_losses.append(loss.item())
val_acc_avg = np.mean(np.array(val_accuracies))
val_acc_ci95 = 1.96 * np.std( | np.array(val_accuracies) | numpy.array |
from __future__ import print_function
import sys
import numpy
import os
import glob
import pickle as cPickle
import signal
import csv
import ntpath
from pyAudioAnalysis import audioFeatureExtraction as aF
from pyAudioAnalysis import audioBasicIO
from scipy import linalg as la
from scipy.spatial import distance
import sklearn.svm
import sklearn.decomposition
import sklearn.ensemble
def signal_handler(signal, frame):
print('You pressed Ctrl+C! - EXIT')
os.system("stty -cbreak echo")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
shortTermWindow = 0.050
shortTermStep = 0.050
eps = 0.00000001
class kNN:
def __init__(self, X, Y, k):
self.X = X
self.Y = Y
self.k = k
def classify(self, testSample):
nClasses = numpy.unique(self.Y).shape[0]
YDist = (distance.cdist(self.X, testSample.reshape(1, testSample.shape[0]), 'euclidean')).T
iSort = numpy.argsort(YDist)
P = numpy.zeros((nClasses,))
for i in range(nClasses):
P[i] = numpy.nonzero(self.Y[iSort[0][0:self.k]] == i)[0].shape[0] / float(self.k)
return (numpy.argmax(P), P)
def classifierWrapper(classifier, classifierType, testSample):
'''
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- classifier: a classifier object of type sklearn.svm.SVC or kNN (defined in this library) or sklearn.ensemble.RandomForestClassifier or sklearn.ensemble.GradientBoostingClassifier or sklearn.ensemble.ExtraTreesClassifier
- classifierType: "svm" or "knn" or "randomforests" or "gradientboosting" or "extratrees"
- testSample: a feature vector (numpy array)
RETURNS:
- R: class ID
- P: probability estimate
EXAMPLE (for some audio signal stored in array x):
import audioFeatureExtraction as aF
import audioTrainTest as aT
# load the classifier (here SVM, for kNN use loadKNNModel instead):
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep] = aT.loadSVModel(modelName)
# mid-term feature extraction:
[MidTermFeatures, _] = aF.mtFeatureExtraction(x, Fs, mtWin * Fs, mtStep * Fs, round(Fs*stWin), round(Fs*stStep));
# feature normalization:
curFV = (MidTermFeatures[:, i] - MEAN) / STD;
# classification
[Result, P] = classifierWrapper(Classifier, modelType, curFV)
'''
R = -1
P = -1
if classifierType == "knn":
[R, P] = classifier.classify(testSample)
elif classifierType == "svm" or classifierType == "randomforest" or classifierType == "gradientboosting" or "extratrees":
R = classifier.predict(testSample.reshape(1,-1))[0]
P = classifier.predict_proba(testSample.reshape(1,-1))[0]
return [R, P]
def regressionWrapper(model, modelType, testSample):
'''
This function is used as a wrapper to pattern classification.
ARGUMENTS:
- model: regression model
- modelType: "svm" or "knn" (TODO)
- testSample: a feature vector (numpy array)
RETURNS:
- R: regression result (estimated value)
EXAMPLE (for some audio signal stored in array x):
TODO
'''
if modelType == "svm" or modelType == "randomforest" or modelType == "svm_rbf":
return (model.predict(testSample.reshape(1,-1))[0])
# elif classifierType == "knn":
# TODO
return None
def randSplitFeatures(features, partTrain):
'''
def randSplitFeatures(features):
This function splits a feature set for training and testing.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- partTrain: percentage
RETURNS:
- featuresTrains: a list of training data for each class
- featuresTest: a list of testing data for each class
'''
featuresTrain = []
featuresTest = []
for i, f in enumerate(features):
[numOfSamples, numOfDims] = f.shape
randperm = numpy.random.permutation(range(numOfSamples))
nTrainSamples = int(round(partTrain * numOfSamples))
featuresTrain.append(f[randperm[0:nTrainSamples]])
featuresTest.append(f[randperm[nTrainSamples::]])
return (featuresTrain, featuresTest)
def trainKNN(features, K):
'''
Train a kNN classifier.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- K: parameter K
RETURNS:
- kNN: the trained kNN variable
'''
[Xt, Yt] = listOfFeatures2Matrix(features)
knn = kNN(Xt, Yt, K)
return knn
def trainSVM(features, Cparam):
'''
Train a multi-class probabilitistic SVM classifier.
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- Cparam: SVM parameter C (cost of constraints violation)
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
svm = sklearn.svm.SVC(C = Cparam, kernel = 'linear', probability = True)
svm.fit(X,Y)
return svm
def trainSVM_RBF(features, Cparam):
'''
Train a multi-class probabilitistic SVM classifier.
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- Cparam: SVM parameter C (cost of constraints violation)
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
svm = sklearn.svm.SVC(C = Cparam, kernel = 'rbf', probability = True)
svm.fit(X,Y)
return svm
def trainRandomForest(features, n_estimators):
'''
Train a multi-class decision tree classifier.
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators)
rf.fit(X,Y)
return rf
def trainGradientBoosting(features, n_estimators):
'''
Train a gradient boosting classifier
Note: This function is simply a wrapper to the sklearn functionality for SVM training
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
rf = sklearn.ensemble.GradientBoostingClassifier(n_estimators = n_estimators)
rf.fit(X,Y)
return rf
def trainExtraTrees(features, n_estimators):
'''
Train a gradient boosting classifier
Note: This function is simply a wrapper to the sklearn functionality for extra tree classifiers
See function trainSVM_feature() to use a wrapper on both the feature extraction and the SVM training (and parameter tuning) processes.
ARGUMENTS:
- features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
- n_estimators: number of trees in the forest
RETURNS:
- svm: the trained SVM variable
NOTE:
This function trains a linear-kernel SVM for a given C value. For a different kernel, other types of parameters should be provided.
'''
[X, Y] = listOfFeatures2Matrix(features)
et = sklearn.ensemble.ExtraTreesClassifier(n_estimators = n_estimators)
et.fit(X,Y)
return et
def trainSVMregression(Features, Y, Cparam):
svm = sklearn.svm.SVR(C = Cparam, kernel = 'linear')
svm.fit(Features,Y)
trainError = numpy.mean(numpy.abs(svm.predict(Features) - Y))
return svm, trainError
def trainSVMregression_rbf(Features, Y, Cparam):
svm = sklearn.svm.SVR(C = Cparam, kernel = 'rbf')
svm.fit(Features,Y)
trainError = numpy.mean(numpy.abs(svm.predict(Features) - Y))
return svm, trainError
def trainRandomForestRegression(Features, Y, n_estimators):
rf = sklearn.ensemble.RandomForestRegressor(n_estimators = n_estimators)
rf.fit(Features,Y)
trainError = numpy.mean(numpy.abs(rf.predict(Features) - Y))
return rf, trainError
def featureAndTrain(listOfDirs, mtWin, mtStep, stWin, stStep, classifierType, modelName, computeBEAT=False, perTrain=0.90):
'''
This function is used as a wrapper to segment-based audio feature extraction and classifier training.
ARGUMENTS:
listOfDirs: list of paths of directories. Each directory contains a signle audio class whose samples are stored in seperate WAV files.
mtWin, mtStep: mid-term window length and step
stWin, stStep: short-term window and step
classifierType: "svm" or "knn" or "randomforest" or "gradientboosting" or "extratrees"
modelName: name of the model to be saved
RETURNS:
None. Resulting classifier along with the respective model parameters are saved on files.
'''
# STEP A: Feature Extraction:
[features, classNames, _] = aF.dirsWavFeatureExtraction(listOfDirs, mtWin, mtStep, stWin, stStep, computeBEAT=computeBEAT)
if len(features) == 0:
print("trainSVM_feature ERROR: No data found in any input folder!")
return
numOfFeatures = features[0].shape[1]
featureNames = ["features" + str(d + 1) for d in range(numOfFeatures)]
writeTrainDataToARFF(modelName, features, classNames, featureNames)
for i, f in enumerate(features):
if len(f) == 0:
print("trainSVM_feature ERROR: " + listOfDirs[i] + " folder is empty or non-existing!")
return
# STEP B: Classifier Evaluation and Parameter Selection:
if classifierType == "svm" or classifierType == "svm_rbf":
classifierParams = numpy.array([0.001, 0.01, 0.5, 1.0, 5.0, 10.0, 20.0])
elif classifierType == "randomforest":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "knn":
classifierParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15])
elif classifierType == "gradientboosting":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
elif classifierType == "extratrees":
classifierParams = numpy.array([10, 25, 50, 100,200,500])
# get optimal classifeir parameter:
features2 = []
for f in features:
fTemp = []
for i in range(f.shape[0]):
temp = f[i,:]
if (not numpy.isnan(temp).any()) and (not numpy.isinf(temp).any()) :
fTemp.append(temp.tolist())
else:
print("NaN Found! Feature vector not used for training")
features2.append(numpy.array(fTemp))
features = features2
bestParam = evaluateClassifier(features, classNames, 100, classifierType, classifierParams, 0, perTrain)
print("Selected params: {0:.5f}".format(bestParam))
C = len(classNames)
[featuresNorm, MEAN, STD] = normalizeFeatures(features) # normalize features
MEAN = MEAN.tolist()
STD = STD.tolist()
featuresNew = featuresNorm
# STEP C: Save the classifier to file
if classifierType == "svm":
Classifier = trainSVM(featuresNew, bestParam)
elif classifierType == "svm_rbf":
Classifier = trainSVM_RBF(featuresNew, bestParam)
elif classifierType == "randomforest":
Classifier = trainRandomForest(featuresNew, bestParam)
elif classifierType == "gradientboosting":
Classifier = trainGradientBoosting(featuresNew, bestParam)
elif classifierType == "extratrees":
Classifier = trainExtraTrees(featuresNew, bestParam)
if classifierType == "knn":
[X, Y] = listOfFeatures2Matrix(featuresNew)
X = X.tolist()
Y = Y.tolist()
fo = open(modelName, "wb")
cPickle.dump(X, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(Y, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classNames, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(bestParam, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(computeBEAT, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
elif classifierType == "svm" or classifierType == "svm_rbf" or classifierType == "randomforest" or classifierType == "gradientboosting" or classifierType == "extratrees":
with open(modelName, 'wb') as fid: # save to file
cPickle.dump(Classifier, fid)
fo = open(modelName + "MEANS", "wb")
cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(classNames, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(computeBEAT, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
def featureAndTrainRegression(dirName, mtWin, mtStep, stWin, stStep, modelType, modelName, computeBEAT=False):
'''
This function is used as a wrapper to segment-based audio feature extraction and classifier training.
ARGUMENTS:
dirName: path of directory containing the WAV files and Regression CSVs
mtWin, mtStep: mid-term window length and step
stWin, stStep: short-term window and step
modelType: "svm" or "knn" or "randomforest"
modelName: name of the model to be saved
RETURNS:
None. Resulting regression model along with the respective model parameters are saved on files.
'''
# STEP A: Feature Extraction:
[features, _, fileNames] = aF.dirsWavFeatureExtraction([dirName],
mtWin,
mtStep,
stWin,
stStep,
computeBEAT=
computeBEAT)
features = features[0]
fileNames = [ntpath.basename(f) for f in fileNames[0]]
featuresFinal = []
# Read CSVs:
CSVs = glob.glob(dirName + os.sep + "*.csv")
regressionLabels = []
regressionNames = []
featuresFinal = []
for c in CSVs: # for each CSV
curRegressionLabels = []
featuresTemp = []
with open(c, 'rt') as csvfile: # open the csv file that contains the current target value's annotations
CSVreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in CSVreader:
if len(row) == 2: # if the current row contains two fields (filename, target value)
if row[0] in fileNames: # ... and if the current filename exists in the list of filenames
index = fileNames.index(row[0])
curRegressionLabels.append(float(row[1]))
featuresTemp.append(features[index,:])
else:
print("Warning: {} not found in list of files.".format(row[0]))
else:
print("Warning: Row with unknown format in regression file")
featuresFinal.append(numpy.array(featuresTemp))
regressionLabels.append(numpy.array(curRegressionLabels)) # curRegressionLabels is the list of values for the current regression problem
regressionNames.append(ntpath.basename(c).replace(".csv", "")) # regression task name
if len(features) == 0:
print("ERROR: No data found in any input folder!")
return
numOfFeatures = featuresFinal[0].shape[1]
# TODO: ARRF WRITE????
# STEP B: Classifier Evaluation and Parameter Selection:
if modelType == "svm" or modelType == "svm_rbf":
modelParams = numpy.array([0.001, 0.005, 0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 5.0, 10.0])
elif modelType == "randomforest":
modelParams = numpy.array([5, 10, 25, 50, 100])
# elif modelType == "knn":
# modelParams = numpy.array([1, 3, 5, 7, 9, 11, 13, 15]);
errors = []
errorsBase = []
bestParams = []
for iRegression, r in enumerate(regressionNames):
# get optimal classifeir parameter:
print("Regression task " + r)
bestParam, error, berror = evaluateRegression(featuresFinal[iRegression], regressionLabels[iRegression], 100, modelType, modelParams)
errors.append(error)
errorsBase.append(berror)
bestParams.append(bestParam)
print("Selected params: {0:.5f}".format(bestParam))
[featuresNorm, MEAN, STD] = normalizeFeatures([featuresFinal[iRegression]]) # normalize features
# STEP C: Save the model to file
if modelType == "svm":
Classifier, _ = trainSVMregression(featuresNorm[0], regressionLabels[iRegression], bestParam)
if modelType == "svm_rbf":
Classifier, _ = trainSVMregression_rbf(featuresNorm[0], regressionLabels[iRegression], bestParam)
if modelType == "randomforest":
Classifier, _ = trainRandomForestRegression(featuresNorm[0], regressionLabels[iRegression], bestParam)
if modelType == "svm" or modelType == "svm_rbf" or modelType == "randomforest":
with open(modelName + "_" + r, 'wb') as fid: # save to file
cPickle.dump(Classifier, fid)
fo = open(modelName + "_" + r + "MEANS", "wb")
cPickle.dump(MEAN, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(STD, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(mtStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stWin, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(stStep, fo, protocol=cPickle.HIGHEST_PROTOCOL)
cPickle.dump(computeBEAT, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
return errors, errorsBase, bestParams
def loadKNNModel(kNNModelName, isRegression=False):
try:
fo = open(kNNModelName, "rb")
except IOError:
print("didn't find file")
return
try:
X = cPickle.load(fo)
Y = cPickle.load(fo)
MEAN = cPickle.load(fo)
STD = cPickle.load(fo)
if not isRegression:
classNames = cPickle.load(fo)
K = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
stWin = cPickle.load(fo)
stStep = cPickle.load(fo)
computeBEAT = cPickle.load(fo)
except:
fo.close()
fo.close()
X = numpy.array(X)
Y = numpy.array(Y)
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
Classifier = kNN(X, Y, K) # Note: a direct call to the kNN constructor is used here
if isRegression:
return(Classifier, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadSVModel(SVMmodelName, isRegression=False):
'''
This function loads an SVM model either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(SVMmodelName+"MEANS", "rb")
except IOError:
print("Load SVM Model: Didn't find file")
return
try:
MEAN = cPickle.load(fo)
STD = cPickle.load(fo)
if not isRegression:
classNames = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
stWin = cPickle.load(fo)
stStep = cPickle.load(fo)
computeBEAT = cPickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(SVMmodelName, 'rb') as fid:
SVM = cPickle.load(fid)
if isRegression:
return(SVM, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(SVM, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadRandomForestModel(RFmodelName, isRegression=False):
'''
This function loads an SVM model either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(RFmodelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = cPickle.load(fo)
STD = cPickle.load(fo)
if not isRegression:
classNames = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
stWin = cPickle.load(fo)
stStep = cPickle.load(fo)
computeBEAT = cPickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(RFmodelName, 'rb') as fid:
RF = cPickle.load(fid)
if isRegression:
return(RF, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(RF, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadGradientBoostingModel(GBModelName, isRegression=False):
'''
This function loads gradient boosting either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(GBModelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = cPickle.load(fo)
STD = cPickle.load(fo)
if not isRegression:
classNames = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
stWin = cPickle.load(fo)
stStep = cPickle.load(fo)
computeBEAT = cPickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(GBModelName, 'rb') as fid:
GB = cPickle.load(fid)
if isRegression:
return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def loadExtraTreesModel(ETmodelName, isRegression=False):
'''
This function loads extra trees either for classification or training.
ARGMUMENTS:
- SVMmodelName: the path of the model to be loaded
- isRegression: a flag indigating whereas this model is regression or not
'''
try:
fo = open(ETmodelName+"MEANS", "rb")
except IOError:
print("Load Random Forest Model: Didn't find file")
return
try:
MEAN = cPickle.load(fo)
STD = cPickle.load(fo)
if not isRegression:
classNames = cPickle.load(fo)
mtWin = cPickle.load(fo)
mtStep = cPickle.load(fo)
stWin = cPickle.load(fo)
stStep = cPickle.load(fo)
computeBEAT = cPickle.load(fo)
except:
fo.close()
fo.close()
MEAN = numpy.array(MEAN)
STD = numpy.array(STD)
COEFF = []
with open(ETmodelName, 'rb') as fid:
GB = cPickle.load(fid)
if isRegression:
return(GB, MEAN, STD, mtWin, mtStep, stWin, stStep, computeBEAT)
else:
return(GB, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT)
def evaluateClassifier(features, ClassNames, nExp, ClassifierName, Params, parameterMode, perTrain=0.90):
'''
ARGUMENTS:
features: a list ([numOfClasses x 1]) whose elements containt numpy matrices of features.
each matrix features[i] of class i is [numOfSamples x numOfDimensions]
ClassNames: list of class names (strings)
nExp: number of cross-validation experiments
ClassifierName: svm or knn or randomforest
Params: list of classifier parameters (for parameter tuning during cross-validation)
parameterMode: 0: choose parameters that lead to maximum overall classification ACCURACY
1: choose parameters that lead to maximum overall F1 MEASURE
RETURNS:
bestParam: the value of the input parameter that optimizes the selected performance measure
'''
# feature normalization:
(featuresNorm, MEAN, STD) = normalizeFeatures(features)
#featuresNorm = features;
nClasses = len(features)
CAll = []
acAll = []
F1All = []
PrecisionClassesAll = []
RecallClassesAll = []
ClassesAll = []
F1ClassesAll = []
CMsAll = []
# compute total number of samples:
nSamplesTotal = 0
for f in features:
nSamplesTotal += f.shape[0]
if nSamplesTotal > 1000 and nExp > 50:
nExp = 50
print("Number of training experiments changed to 50 due to high number of samples")
if nSamplesTotal > 2000 and nExp > 10:
nExp = 10
print("Number of training experiments changed to 10 due to high number of samples")
for Ci, C in enumerate(Params): # for each param value
CM = | numpy.zeros((nClasses, nClasses)) | numpy.zeros |
import json
import sys
import os
import io
import copy
from collections import OrderedDict
import warnings
from typing import Optional
import numpy as np
import tiledb
from tiledb import TileDBError
# from tiledb.tests.common import xprint
if sys.version_info >= (3, 3):
unicode_type = str
else:
unicode_type = unicode
unicode_dtype = np.dtype(unicode_type)
def check_dataframe_deps():
pd_error = """Pandas version >= 1.0 required for dataframe functionality.
Please `pip install pandas>=1.0` to proceed."""
pa_error = """PyArrow version >= 1.0 is suggested for dataframe functionality.
Please `pip install pyarrow>=1.0`."""
from distutils.version import LooseVersion
try:
import pandas as pd
except ImportError:
raise Exception(pd_error)
if LooseVersion(pd.__version__) < LooseVersion("1.0"):
raise Exception(pd_error)
try:
import pyarrow as pa
if LooseVersion(pa.__version__) < LooseVersion("1.0"):
warnings.warn(pa_error)
except ImportError:
warnings.warn(pa_error)
# Note: 'None' is used to indicate optionality for many of these options
# For example, if the `sparse` argument is unspecified we will default
# to False (dense) unless the input has string or heterogenous indexes.
TILEDB_KWARG_DEFAULTS = {
"ctx": None,
"sparse": None,
"index_dims": None,
"allows_duplicates": True,
"mode": "ingest",
"attr_filters": None,
"dim_filters": None,
"coords_filters": None,
"full_domain": False,
"tile": None,
"row_start_idx": None,
"fillna": None,
"column_types": None,
"capacity": None,
"date_spec": None,
"cell_order": "row-major",
"tile_order": "row-major",
"timestamp": None,
"debug": None,
}
def parse_tiledb_kwargs(kwargs):
parsed_args = dict(TILEDB_KWARG_DEFAULTS)
for key in TILEDB_KWARG_DEFAULTS.keys():
if key in kwargs:
parsed_args[key] = kwargs.pop(key)
return parsed_args
class ColumnInfo:
def __init__(self, dtype, repr: Optional[str] = None, nullable: bool = False):
self.dtype = dtype
self.repr = repr
self.nullable = nullable
def dtype_from_column(col):
import pandas as pd
col_dtype = col.dtype
if col_dtype in (
np.int32,
np.int64,
np.uint32,
np.uint64,
np.float,
np.double,
np.uint8,
):
return ColumnInfo(col_dtype)
if isinstance(
col_dtype,
(
pd.Int64Dtype,
pd.Int32Dtype,
pd.Int16Dtype,
pd.Int8Dtype,
pd.UInt64Dtype,
pd.UInt32Dtype,
pd.UInt16Dtype,
pd.UInt8Dtype,
),
):
return ColumnInfo(col_dtype.numpy_dtype, repr=str(col_dtype), nullable=True)
if isinstance(col_dtype, pd.BooleanDtype):
return ColumnInfo(np.uint8, repr=pd.BooleanDtype(), nullable=True)
# TODO this seems kind of brittle
if col_dtype.base == np.dtype("M8[ns]"):
if col_dtype == np.dtype("datetime64[ns]"):
return ColumnInfo(col_dtype)
elif hasattr(col_dtype, "tz"):
raise ValueError("datetime with tz not yet supported")
else:
raise ValueError(
"unsupported datetime subtype ({})".format(type(col_dtype))
)
# Pandas 1.0 has StringDtype extension type
if col_dtype.name == "string":
return ColumnInfo(unicode_dtype)
if col_dtype == "bool":
return ColumnInfo(np.uint8, repr=np.dtype("bool"))
if col_dtype == np.dtype("O"):
# Note: this does a full scan of the column... not sure what else to do here
# because Pandas allows mixed string column types (and actually has
# problems w/ allowing non-string types in object columns)
inferred_dtype = pd.api.types.infer_dtype(col)
if inferred_dtype == "bytes":
return ColumnInfo(np.bytes_)
elif inferred_dtype == "string":
# TODO we need to make sure this is actually convertible
return ColumnInfo(unicode_dtype)
elif inferred_dtype == "mixed":
raise ValueError(
"Column '{}' has mixed value dtype and cannot yet be stored as a TileDB attribute".format(
col.name
)
)
raise ValueError("Unhandled column type: '{}'".format(col_dtype))
# TODO make this a staticmethod on Attr?
def attrs_from_df(df, index_dims=None, filters=None, column_types=None, ctx=None):
attr_reprs = dict()
if ctx is None:
ctx = tiledb.default_ctx()
if column_types is None:
column_types = dict()
attrs = list()
for name, col in df.items():
if isinstance(filters, dict):
if name in filters:
attr_filters = filters[name]
else:
attr_filters = None
elif filters is not None:
attr_filters = filters
else:
attr_filters = tiledb.FilterList([tiledb.ZstdFilter(1, ctx=ctx)])
# ignore any column used as a dim/index
if index_dims and name in index_dims:
continue
if name in column_types:
spec_type = column_types[name]
# Handle ExtensionDtype
if hasattr(spec_type, "type"):
spec_type = spec_type.type
attr_info = ColumnInfo(spec_type)
else:
attr_info = dtype_from_column(col)
attrs.append(
tiledb.Attr(
name=name,
dtype=attr_info.dtype,
filters=attr_filters,
nullable=attr_info.nullable,
)
)
if attr_info.repr is not None:
attr_reprs[name] = attr_info.repr
return attrs, attr_reprs
def dim_info_for_column(ctx, df, col, tile=None, full_domain=False, index_dtype=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if len(col_values) < 1:
raise ValueError(
"Empty column '{}' cannot be used for dimension!".format(col_name)
)
if index_dtype is not None:
dim_info = ColumnInfo(index_dtype)
elif col_values.dtype is np.dtype("O"):
col_val0_type = type(col_values[0])
if col_val0_type in (bytes, unicode_type):
# TODO... core only supports TILEDB_ASCII right now
dim_info = ColumnInfo(np.bytes_)
else:
raise TypeError(
"Unknown column type not yet supported ('{}')".format(col_val0_type)
)
else:
dim_info = dtype_from_column(col_values)
return dim_info
def dim_for_column(
ctx, name, dim_info, col, tile=None, full_domain=False, ndim=None, dim_filters=None
):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if tile is None:
if ndim is None:
raise TileDBError("Unexpected Nonetype ndim")
if ndim == 1:
tile = 10000
elif ndim == 2:
tile = 1000
elif ndim == 3:
tile = 100
else:
tile = 10
dtype = dim_info.dtype
if full_domain:
if not dim_info.dtype in (np.bytes_, np.unicode_):
# Use the full type domain, deferring to the constructor
(dtype_min, dtype_max) = tiledb.libtiledb.dtype_range(dim_info.dtype)
dim_max = dtype_max
if dtype.kind == "M":
date_unit = | np.datetime_data(dtype) | numpy.datetime_data |
from timeit import default_timer as timer
import subprocess
import numpy as np
import sys
'''
usage: python performance.py <luthien_location> <luthien_location2> <n_tests>
'''
n_tests = 10
luthien2 = sys.argv[2]
luthien1 = sys.argv[1]
if len(sys.argv) >= 4:
n_tests = int(sys.argv[3])
big_single = "big/SRR941557.fastq"
big_single_out = "output/SRR941557.fastq"
big_pair_1 = "big/SRR3309317_1.mini.fastq"
big_pair_2 = "big/SRR3309317_2.mini.fastq"
big_pair_1_out = "output/SRR3309317_1.mini.fastq"
big_pair_2_out = "output/SRR3309317_2.mini.fastq"
small_single = "test.fastq"
small_single_out = "output/test.fastq"
small_pair_1 = "test.f.fastq"
small_pair_2 = "test.r.fastq"
small_pair_1_out = "output/test.f.fastq"
small_pair_2_out = "output/test.r.fastq"
def runCommand(cmd):
#print("\t> " + cmd)
process = subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#process = subprocess.call(cmd, shell=True)
return process
def run_luthien(threads, chunk_size, luthien):
#print("Running test for " + str(threads) + " " + str(chunk_size))
durations_single = []
durations_paired = []
code = runCommand(" ".join([luthien, "-i1", big_pair_1, "-o1", big_pair_1_out,
"-i2", big_pair_2, "-o2", big_pair_2_out,
"-t", str(threads), "-c", str(chunk_size)]))
for i in range(n_tests):
start = timer()
code = runCommand(" ".join([luthien, "-i1", big_pair_1, "-o1", big_pair_1_out,
"-i2", big_pair_2, "-o2", big_pair_2_out,
"-t", str(threads), "-c", str(chunk_size)]))
if(code != 0):
quit()
end = timer()
durations_paired.append(end-start)
code = runCommand(" ".join([luthien, "-i1", big_single, "-o1", big_single_out,
"-t", str(threads), "-c", str(chunk_size)]))
for i in range(n_tests):
start = timer()
code = runCommand(" ".join([luthien, "-i1", big_single, "-o1", big_single_out,
"-t", str(threads), "-c", str(chunk_size)]))
end = timer()
if(code != 0):
quit()
durations_single.append(end-start)
return np.array(durations_single), np.array(durations_paired)
def run_fastp(threads):
duration_single = 0.0
duration_paired = 0.0
cmd_single = " ".join(["~/miniconda3/bin/fastp --in1", big_single, "-o", big_single_out,
"-w", str(threads), "-A"])
cmd_paired = " ".join(["~/miniconda3/bin/fastp --in1", big_pair_1, "-o", big_pair_1_out,
"--in2", big_pair_2, "--out2", big_pair_2_out,
"-w", str(threads), "-A"])
code = runCommand(cmd_single)
for i in range(n_tests):
start = timer()
code = runCommand(cmd_single)
end = timer()
if(code != 0):
quit()
duration_single += (end-start)
code = runCommand(cmd_paired)
for i in range(n_tests):
start = timer()
code = runCommand(cmd_paired)
end = timer()
if(code != 0):
quit()
duration_paired += (end-start)
return duration_single / n_tests, duration_paired / n_tests
start_threads = 4
step_threads = 1
end_threads = 4
threads_list = np.arange(start_threads, end_threads+step_threads, step_threads)
#chunks_list = np.arange(start_chunk, end_chunk+step_chunk, step_chunk)
for thread_number in threads_list:
#fastp_single, fastp_paired = run_fastp(thread_number)
#print("-t " + str(4) + " -A: " + str(fastp_single) + "s, " + str(fastp_paired) + "s")
single_time, paired_time = run_luthien(thread_number, 12, luthien2)
print(luthien2 + ": -t " + str(thread_number) + ": "
+ str(np.mean(single_time))+"+/-"+str(np.std(single_time)) +" single, "
+ str(np.mean(paired_time))+"+/-"+str(np.std(paired_time)) +" paired.")
single_time, paired_time = run_luthien(thread_number, 12, luthien1)
print(luthien1 + ": -t " + str(thread_number) + ": "
+ str(np.mean(single_time))+"+/-"+str(np.std(single_time)) +" single, "
+ str(np.mean(paired_time))+"+/-"+str( | np.std(paired_time) | numpy.std |
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utility classes."""
import logging
import math
import os
import sys
import time
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet.gluon import rnn, contrib
from .data import ParserVocabulary
from .tarjan import Tarjan
class Progbar(object):
"""Progbar class copied from keras (https://github.com/fchollet/keras/)
Displays a progress bar.
Small edit : added strict arg to update
Parameters
----------
target : int
Total number of steps expected.
width : int
Progress bar width.
verbose : int
Verbosity level. Options are 1 and 2.
"""
def __init__(self, target, width=30, verbose=1):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, exact=None, strict=None):
"""
Updates the progress bar.
Parameters
----------
current : int
Index of current step.
values : List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
exact : List of tuples (name, value_for_last_step).
The progress bar will display these values directly.
"""
values = values or []
exact = exact or []
strict = strict or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
for cells in exact:
k, v, w = cells[0], cells[1], 4
if len(cells) == 3:
w = cells[2]
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = [v, 1, w]
for k, v in strict:
if k not in self.sum_values:
self.unique_values.append(k)
self.sum_values[k] = v
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
numdigits = 0 if self.target == 0 or math.isnan(self.target) \
else int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = 0 if self.target == 0 else float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
if isinstance(self.sum_values[k], list):
info += (' - %s: %.' + str(self.sum_values[k][2]) + 'f') % (
k, self.sum_values[k][0] / max(1, self.sum_values[k][1]))
else:
info += ' - %s: %s' % (k, self.sum_values[k])
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * ' ')
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write('\n')
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s: %.4f' % (k,
self.sum_values[k][0] / max(1, self.sum_values[k][1]))
sys.stdout.write(info + '\n')
def add(self, n, values=None):
values = values or []
self.update(self.seen_so_far + n, values)
def mxnet_prefer_gpu():
"""If gpu available return gpu, else cpu
Returns
-------
context : Context
The preferable GPU context.
"""
gpu = int(os.environ.get('MXNET_GPU', default=0))
if gpu in mx.test_utils.list_gpus():
return mx.gpu(gpu)
return mx.cpu()
def init_logger(root_dir, name='train.log'):
"""Initialize a logger
Parameters
----------
root_dir : str
directory for saving log
name : str
name of logger
Returns
-------
logger : logging.Logger
a logger
"""
os.makedirs(root_dir, exist_ok=True)
log_formatter = logging.Formatter('%(message)s')
logger = logging.getLogger(name)
file_handler = logging.FileHandler('{0}/{1}'.format(root_dir, name), mode='w')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
return logger
def orthonormal_VanillaLSTMBuilder(lstm_layers, input_dims, lstm_hiddens,
dropout_h=0., debug=False):
"""Build a standard LSTM cell, with variational dropout,
with weights initialized to be orthonormal (https://arxiv.org/abs/1312.6120)
Parameters
----------
lstm_layers : int
Currently only support one layer
input_dims : int
word vector dimensions
lstm_hiddens : int
hidden size
dropout_h : float
dropout on hidden states
debug : bool
set to True to skip orthonormal initialization
Returns
-------
lstm_cell : VariationalDropoutCell
A LSTM cell
"""
assert lstm_layers == 1, 'only accept one layer lstm'
W = orthonormal_initializer(lstm_hiddens, lstm_hiddens + input_dims, debug)
W_h, W_x = W[:, :lstm_hiddens], W[:, lstm_hiddens:]
b = nd.zeros((4 * lstm_hiddens,))
b[lstm_hiddens:2 * lstm_hiddens] = -1.0
lstm_cell = rnn.LSTMCell(input_size=input_dims, hidden_size=lstm_hiddens,
i2h_weight_initializer=mx.init.Constant(np.concatenate([W_x] * 4, 0)),
h2h_weight_initializer=mx.init.Constant(np.concatenate([W_h] * 4, 0)),
h2h_bias_initializer=mx.init.Constant(b))
wrapper = contrib.rnn.VariationalDropoutCell(lstm_cell, drop_states=dropout_h)
return wrapper
def biLSTM(f_lstm, b_lstm, inputs, dropout_x=0.):
"""Feature extraction through BiLSTM
Parameters
----------
f_lstm : VariationalDropoutCell
Forward cell
b_lstm : VariationalDropoutCell
Backward cell
inputs : NDArray
seq_len x batch_size
dropout_x : float
Variational dropout on inputs
Returns
-------
outputs : NDArray
Outputs of BiLSTM layers, seq_len x 2 hidden_dims x batch_size
"""
for f, b in zip(f_lstm, b_lstm):
inputs = nd.Dropout(inputs, dropout_x, axes=[0]) # important for variational dropout
fo, _ = f.unroll(length=inputs.shape[0], inputs=inputs, layout='TNC', merge_outputs=True)
bo, _ = b.unroll(length=inputs.shape[0], inputs=inputs.flip(axis=0), layout='TNC',
merge_outputs=True)
f.reset()
b.reset()
inputs = nd.concat(fo, bo.flip(axis=0), dim=2)
return inputs
def leaky_relu(x):
"""slope=0.1 leaky ReLu
Parameters
----------
x : NDArray
Input
Returns
-------
y : NDArray
y = x > 0 ? x : 0.1 * x
"""
return nd.LeakyReLU(x, slope=.1)
def bilinear(x, W, y, input_size, seq_len, batch_size, num_outputs=1, bias_x=False, bias_y=False):
"""Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x]
x batch_size
"""
if bias_x:
x = nd.concat(x, nd.ones((1, seq_len, batch_size)), dim=0)
if bias_y:
y = nd.concat(y, nd.ones((1, seq_len, batch_size)), dim=0)
ny = input_size + bias_y
# W: (num_outputs x ny) x nx
lin = nd.dot(W, x)
if num_outputs > 1:
lin = reshape_fortran(lin, (ny, num_outputs * seq_len, batch_size))
y = y.transpose([2, 1, 0]) # May cause performance issues
lin = lin.transpose([2, 1, 0])
blin = nd.batch_dot(lin, y, transpose_b=True)
blin = blin.transpose([2, 1, 0])
if num_outputs > 1:
blin = reshape_fortran(blin, (seq_len, num_outputs, seq_len, batch_size))
return blin
def orthonormal_initializer(output_size, input_size, debug=False):
"""adopted from <NAME> https://github.com/tdozat/Parser/blob/master/lib/linalg.py
Parameters
----------
output_size : int
input_size : int
debug : bool
Whether to skip this initializer
Returns
-------
Q : np.ndarray
The orthonormal weight matrix of input_size x output_size
"""
print((output_size, input_size))
if debug:
Q = | np.random.randn(input_size, output_size) | numpy.random.randn |
from .single_tile_affine_renderer import SingleTileAffineRenderer
import numpy as np
import tinyr
from enum import Enum
class BlendType(Enum):
NO_BLENDING = 0
AVERAGING = 1
LINEAR = 2
MULTI_BAND_SEAM = 3
class MultipleTilesAffineRenderer:
BLEND_TYPE = {
"NO_BLENDING" : 0,
"AVERAGING" : 1,
"LINEAR" : 2
}
def __init__(self, single_tiles, blend_type=BlendType.NO_BLENDING):
"""Receives a number of image paths, and for each a transformation matrix"""
self.blend_type = blend_type
self.single_tiles = single_tiles
# Create an RTree of the bounding boxes of the tiles
self.rtree = tinyr.RTree(interleaved=True, max_cap=5, min_cap=2)
for t in self.single_tiles:
bbox = t.get_bbox()
# using the (x_min, y_min, x_max, y_max) notation
self.rtree.insert(t, (bbox[0], bbox[2], bbox[1], bbox[3]))
#should_compute_mask = False if self.blend_type == 0 else True
#self.single_tiles = [SingleTileAffineRenderer(img_path, img_shape[1], img_shape[0], compute_mask=should_compute_mask) for img_path, img_shape in zip(img_paths, img_shapes)]
#for i, matrix in enumerate(transform_matrices):
# self.single_tiles[i].add_transformation(matrix)
def add_transformation(self, transform_matrix):
"""Adds a transformation to all tiles"""
self.rtree = tinyr.RTree(interleaved=True, max_cap=5, min_cap=2)
for single_tile in self.single_tiles:
single_tile.add_transformation(transform_matrix)
bbox = single_tile.get_bbox()
# using the (x_min, y_min, x_max, y_max) notation
self.rtree.insert(single_tile, (bbox[0], bbox[2], bbox[1], bbox[3]))
def render(self):
if len(self.single_tiles) == 0:
return None, None
# Render all tiles by finding the bounding box, and using crop
all_bboxes = np.array([t.get_bbox() for t in self.single_tiles]).T
bbox = [ | np.min(all_bboxes[0]) | numpy.min |
"""detectlines includes tasks and tools for handling 1-d spectra
"""
import numpy as np
from PySpectrograph import SpectrographError
default_kernal = [0, -1, -2, -3, -2, -1, 0, 1, 2, 3, 2, 1, 0]
def centroid(xarr, yarr, kern=default_kernal, mask=None, mode='same'):
"""Find the centroid of a line following a similar algorithm as
the center1d algorithm in IRAF. xarr and yarr should be an area
around the desired feature to be centroided. The default kernal
is used if the user does not specific one.
The algorithm solves for the solution to the equation
..math:: \int (I-I_0) f(x-x_0) dx = 0
returns xc
"""
if len(yarr) < len(kern):
raise SpectrographError('Array has to be larger than kernal')
if mask is not None:
# catch the fact that at the edges it
if mask.sum() < len(default_kernal):
warr = np.convolve(yarr, kern, mode=mode)
xc = np.interp(0, warr[mask], xarr[mask])
return xc
else:
yarr = yarr[mask]
xarr = xarr[mask]
# convle the input array with the default kernal
warr = | np.convolve(yarr, kern, mode=mode) | numpy.convolve |
from fides import (
Optimizer, BFGS, SR1, DFP, BB, BG, Broyden, GNSBFGS, HybridFixed,
FX, SSM, TSSM, SubSpaceDim, StepBackStrategy
)
import numpy as np
import logging
import pytest
import fides
import time
def rosen(x):
f = 100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0]) ** 2
return f
def rosengrad(x):
f = rosen(x)
g = np.array([-400 * (x[1] - x[0] ** 2) * x[0] - 2 * (1 - x[0]),
200 * (x[1] - x[0] ** 2)])
return f, g
def rosenboth(x):
f, g = rosengrad(x)
h = np.array([[1200 * x[0] ** 2 - 400 * x[1] + 2, -400 * x[0]],
[-400 * x[0], 200]])
return f, g, h
def fletcher(x):
res = np.array([np.sqrt(10.2)*x[0],
np.sqrt(10.8)*x[1],
4.6-x[0]**2,
4.9-x[1]**2])
sres = np.array([[np.sqrt(10.2), 0],
[0, np.sqrt(10.8)],
[-2*x[0], 0],
[0, -2*x[1]]])
return res, sres
def rosenrandomfail(x):
f, g, h = rosenboth(x)
p = 1/4 # elementwise probability for nan
if | np.random.choice(a=[True, False], p=[p, 1-p]) | numpy.random.choice |
import brancher.config as cfg
cfg.set_device('gpu')
print(cfg.device)
import numpy as np
import matplotlib.pyplot as plt
from brancher.variables import RootVariable, ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable, CategoricalVariable, EmpiricalVariable, RandomIndices
from brancher import inference
import brancher.functions as BF
from brancher.particle_inference_tools import VoronoiSet
from brancher import inference
from brancher.inference import WassersteinVariationalGradientDescent as WVGD
#import brancher.config as cfg
#cfg.set_device('gpu')
#print(cfg.device)
# Data
import torchvision
ELBO_list = []
num_iterations = 25
for iteration in range(num_iterations):
# Data
image_size = 28
num_classes = 10
train = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=None)
test = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=None)
dataset_size = 500 #
indices = np.random.choice(range(len(train)), dataset_size) # le
test_indices = np.random.choice(range(len(test)), dataset_size)# n(train)
particles_ELBO = []
for num_particles in [3, 1]:
input_variable = np.reshape(train.train_data.numpy()[indices, :], newshape=(dataset_size, 1, image_size, image_size))
output_labels = train.train_labels.numpy()[indices]
# Test set data
input_variable_test = np.reshape(test.test_data.numpy()[test_indices, :],
newshape=(dataset_size, 1, image_size, image_size))
output_labels_test = test.test_labels.numpy()[test_indices]
# Data sampling model
minibatch_size = 30
minibatch_indices = RandomIndices(dataset_size=dataset_size, batch_size=minibatch_size,
name="indices", is_observed=True)
x = EmpiricalVariable(input_variable, indices=minibatch_indices,
name="x", is_observed=True)
labels = EmpiricalVariable(output_labels, indices=minibatch_indices,
name="labels", is_observed=True)
# Test data
x_test = EmpiricalVariable(input_variable_test, indices=minibatch_indices,
name="x", is_observed=True)
labels_test = EmpiricalVariable(output_labels_test, indices=minibatch_indices,
name="labels", is_observed=True)
# Forward pass
in_channels = 1
out_channels = 10
image_size = 28
Wk = NormalVariable(loc=np.zeros((out_channels, in_channels, 2, 2)),
scale=10*np.ones((out_channels, in_channels, 2, 2)),
name="Wk")
z = DeterministicVariable(BF.mean(BF.relu(BF.conv2d(x, Wk, stride=1)), (2, 3)), name="z")
Wl = NormalVariable(loc=np.zeros((num_classes, out_channels)),
scale=10*np.ones((num_classes, out_channels)),
name="Wl")
b = NormalVariable(loc= | np.zeros((num_classes, 1)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 26 18:07:47 2021
@author: <NAME>
Cestimii.
Compute curvature estimations on point clouds using integral invariants.
This file contains all methods that have something to do with the
initialization, construction, splitting or merging of occupancy grids or
occupancy grids of boundary boxes. Both on strict and relaxed occupancy grids.
"""
import time
import warnings
import numpy as np
import scipy.ndimage as nd
#these three imports are only necessary for imgstack method
from PIL import Image
import h5py
import os
import cestimii.geomshapes as gs
def constructoccgrid_sphere(r=6, samp=0):
"""
Constructs a (strict) occupancy grids of a spheres of radius 'r'.
Input is a parameter 'r', which specifies the radius of the sphere. If r
is negative, an alternative, more accurate, but less efficient way to
generate a sphere of radius -r is used.
While the "samp" parameter, which specifies the amount of points used to
create the sphere's point cloud, often yields good results for 360*5 for
a wide variety of scales, setting samp=0, uses a formula to dynamically
adapt the sample rate to the radius to ensure the sphere's occupancy grid
is always initialized correctly.
Returns a matrix 'OGS', which represent the Occupancy Grid
of the Sphere with radius 'r'. OGB has dimensions (2*r+1)x(2*r+1)x(2*r+1).
The default sphere radius 'r' is 6.
Formerly known as "createsphereoccgrid()".
"""
if samp==0:
samp=360*r/2
if (r == 0.5) | (r == 1.5) | (r < 0 ):
if r == 0.5:
OGB = np.ones((1,1,1), int)
return OGB
if r == 1.5:
OGB = np.ones((3,3,3), int)
OGB[0,0,0] = 0
OGB[0,0,2] = 0
OGB[0,2,0] = 0
OGB[0,2,2] = 0
OGB[2,0,0] = 0
OGB[2,0,2] = 0
OGB[2,2,0] = 0
OGB[2,2,2] = 0
OGB[1,1,1] = 0
return OGB
if r < 1: #this kernel might be more precise
radius = -r
W = 2*radius +1
center=(radius, radius, radius)
a=[]
b=[]
c=[]
OGB=np.zeros((W,W,W), np.int8)
for z in range(W):
for y in range(W):
for x in range(W):
val=(x-center[2])**2+(y-center[1])**2+(z-center[0])**2
if val==radius**2:
a.append(x)
b.append(y)
c.append(z)
OGB[z,y,x]=1
return OGB
elif type(r)!=int:
raise NameError('Radius has to be a integer.')
else:
#load sphere pointcloud
x,y,z = gs.sphere(r=100, c=[0,0,0],
samp=samp)
x = x.flatten()
y = y.flatten()
z = z.flatten()
#find min,max values of each axis
xmin =np.min(x)
xmax =np.max(x)
ymin =np.min(y)
ymax =np.max(y)
zmin =np.min(z)
zmax =np.max(z)
#normalise the axes, i.e. rescale the object (!)
xn = ((x-xmin)/(xmax-xmin))
yn = ((y-ymin)/(ymax-ymin))
zn = ((z-zmin)/(zmax-zmin))
#init grid over whole ball domain
OGS = np.zeros([2*r+1,2*r+1,2*r+1], int) #see sketch on rmkbl tablet
#populate ballgrid boundary
OGS[(np.round(xn * 2*r)).astype(int),
(np.round(yn * 2*r)).astype(int),
(np.round(zn * 2*r)).astype(int)] = 1
return OGS
def constructoccgrid_ball(r=6, samp=0):
"""
Constructs a (strict) occupancy grid of a ball of radius 'r'.
Input is a parameter 'r', which specifies the radius of the ball. If r
is negative, an alternative, more accurate, but less efficient way to
generate a sphere of radius -r is used. As the kernel is quite inaccurate
for small values of r, I hardcoded two small kernels for r=1 and r=1.5,
whereby the latter should serve as a better kernel for r=2.
While the "samp" parameter, which specifies the amount of points used to
create the sphere's point cloud, often yields good results for 360*5 for
a wide variety of scales, setting samp=0, uses a formula to dynamically
adapt the sample rate to the radius to ensure the sphere's occupancy grid
is always initialized correctly.
Returns a matrix 'OGB', which represent the Occupancy Grid
of the Ball with radius 'r'. OGB has dimensions (2*r+1)x(2*r+1)x(2*r+1).
The default ball radius 'r' is 6.
Formerly known as "createballoccgrid()".
"""
if samp==0:
samp=360*r//2
if (r == 0.5) | (r == 1.5) | (r < 0 ):
if r == 0.5:
OGB = np.ones((1,1,1), int)
return OGB
if r == 1.5:
OGB = np.ones((3,3,3), int)
OGB[0,0,0] = 0
OGB[0,0,2] = 0
OGB[0,2,0] = 0
OGB[0,2,2] = 0
OGB[2,0,0] = 0
OGB[2,0,2] = 0
OGB[2,2,0] = 0
OGB[2,2,2] = 0
return OGB
if r < 1: #more accurate, but less efficient.
radius = -r
W = 2*radius +1
center=(radius, radius, radius)
a=[]
b=[]
c=[]
OGB=np.zeros((W,W,W), np.int8)
for z in range(W):
for y in range(W):
for x in range(W):
val=(x-center[2])**2+(y-center[1])**2+(z-center[0])**2
if val<=radius**2:
a.append(x)
b.append(y)
c.append(z)
OGB[z,y,x]=1
return OGB
elif type(r)!=int:
raise NameError('Radius has to be an integer.')
else:
#load pointcloud
x,y,z = gs.sphere(r=100, c=[0,0,0],
samp=samp)
x = x.flatten()
y = y.flatten()
z = z.flatten()
#find min,max values of each axis
xmin =np.min(x)
xmax =np.max(x)
ymin =np.min(y)
ymax =np.max(y)
zmin =np.min(z)
zmax =np.max(z)
#normalise the axes, i.e. rescale the object (!)
xn = ((x-xmin)/(xmax-xmin))
yn = ((y-ymin)/(ymax-ymin))
zn = ((z-zmin)/(zmax-zmin))
#init grid over whole ball domain
OGB = np.zeros([2*r+1,2*r+1,2*r+1], int)
#populate ballgrid boundary
OGB[(np.round(xn * 2*r)).astype(int),
(np.round(yn * 2*r)).astype(int),
(np.round(zn * 2*r)).astype(int)] = 1
#fill ballgrid
OGB = nd.binary_fill_holes(OGB).astype(int)
return OGB
def constructoccgrid_ballzone(kr, alpha):
"""
Constructs an (strict) occupancy grid of a ballzone of with 'alpha', i.e.
difference of a ball of radius 'r' and a ball of radius 'r-alpha'.
Input:
-'r': specifies the radius of the ball
-'alpha': is used for the inner circle of the zonal ball area, i.e.
the inner circle is of radius r-alpha
-'r', which specifies the radius of the ball. If r
is negative, an alternative, more accurate, but less efficient way to
generate a sphere of radius -r is used. As the kernel is quite
inaccurate for small values of r, I hardcoded two small kernels for
r=1 and r=1.5, whereby the latter should serve as a better kernel for
r=2.
Returns a matrix 'OGBZ', which represent the Occupancy Grid
of the ballzone. I.e. we return the difference between the ball kernel of
size r and the ball kernel of size r-alpha. This ballzone is alpha wide.
"""
#get occgrid of ball with radius r
rball = constructoccgrid_ball(r=kr) #dims (2xkr+1)**3
#get occgrid of ball with radius r
ralphaball = constructoccgrid_ball(r=kr-alpha) #(2x(kr-alpha) +1)**3
#pad occgrid ralphaball to the size of the occgrid rball
ralphaball = np.pad(ralphaball,((alpha, alpha),(alpha, alpha),
(alpha, alpha)), 'constant')
OGBZ = rball-ralphaball
return OGBZ
def constructoccgrid_pointcloud(inp, rho, ocg="str", taulow=0, fill=1):
"""
Construct a (strict or relaxed) occupancy grid of arbitrary pointclouds.
Input:
-'inp': inp can be a .xyz pointcloud in the format x y z, OR a list or
array in the format of z y x.
-'rho': controls the amount of cells in the occupancy grid.
-'ocg' specifies wether a strict or relaxed occupancy grid is used in
the method itself. Use "str" for strict occgrid, "rlx" for relaxed.
-'taulow' is the threshold for the values in the relaxed occgrid. If a
value is >taulow it is used in the relaxed occgrid and ignored
otherwise. This exists to limit the error that can happen due to the
usage of a relaxed occupancy grid.
-'fill': controls wether occupancy grid will be filled or not.
'fill'==1 means it is filled (using
ndimage.binary_fill_holes() ).
Due to the resizing with rho, the output occupancy grid might be of
lower resolution than the actual dataset.
If the occupancy grid is not filled correctly, try reducing 'rho' or use
a point cloud that is sampled more densely. A relaxed occgrid will not
be filled.
Returns a matrix 'OGD', which represents the Occupancy Grid of the
pointcloud over the whole volume Domain.
"""
if type(inp)==str:
#load pointcloud
if ocg=="str":
x, y, z = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2))
else:
x, y, z, vals = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2,3))
elif isinstance(inp,np.ndarray):
z = inp[:,0]
y = inp[:,1]
x = inp[:,2]
if ocg=="rlx":
vals = inp[:,3]
elif isinstance(inp,list):
#we assume the input list is given like [z,y,x]
inp = np.transpose(inp) # shape is now [#pts, 3] (= z y x)
#get the separate coordinate vectors
z = inp[:,0]
y = inp[:,1]
x = inp[:,2]
if ocg=="rlx":
vals = inp[:,3]
else:
raise NameError('Input can be an already loaded pointcloud that \
consists of the three coordinates z y x or a string \
that leads to the file that is in z y x format with \
no header.')
if ocg=="rlx":
#first, shift values if they are negative, then normalize vals s.t.
#max is 1, then multiply all values such that the maximum is sc
valsmin = np.min(vals)
if valsmin<0:
vals = vals-valsmin #shift to 0
valsmax = np.max(vals)
if valsmax==0:
raise NameError('Maximum Value of the point cloud is 0. Cant \
divide by zero')
vals = (vals/valsmax)
#find min,max values of each axis
xmin =np.min(x)
ymin =np.min(y)
zmin =np.min(z)
#normalise x and then rescale axes to max val
# xrs = ((x-xmin)/(xmax-xmin))*xmax
# yrs = ((y-ymin)/(ymax-ymin))*ymax
# zrs = ((z-zmin)/(zmax-zmin))*zmax
xrs = (x-xmin)
yrs = (y-ymin)
zrs = (z-zmin)
xmax = np.max(xrs)
ymax = np.max(yrs)
zmax = np.max(zrs)
if ocg=="str":
#init grid over domain
OGD = np.zeros([int(zmax*rho)+1,int(ymax*rho)+1,int(xmax*rho)+1], bool)
#populate domaingrid boundary
OGD[(zrs * rho).astype(int),
(yrs * rho).astype(int),
(xrs * rho).astype(int)] = 1
#fill domaingrid
if fill==1:
OGD = nd.binary_fill_holes(OGD).astype(int)
else:
#init grid over domain
OGD = np.zeros([int(zmax*rho)+1,int(ymax*rho)+1,int(xmax*rho)+1],
np.double)
#populate domaingrid boundary but only for values >= taulow
okvals = vals >= taulow
okindc = np.where(okvals==1)
OGD[(zrs[okindc[0]] * rho).astype(int),
(yrs[okindc[0]] * rho).astype(int),
(xrs[okindc[0]] * rho).astype(int)] = vals
return OGD
def constructpcagrids_pointcloud(inp, rho, kr=6, ocg="str", taulow=0,
variant=1, debug=0):
"""
Construct all necessary occupancy grids for the calculation of the
curvature estimation using integral invariants and PCA.
Input:
-'inp' can be an already loaded pointcloud that consists of the three
coordinates x y z or a string that leads to the file that is in x y z
format with no header.
-'rho' controls the amount of cells in the occupancy grid (=rho+1).
-'kr' is the kernel radius
-'ocg' specifies wether a strict or relaxed occupancy grid is used in
the method itself. Use "str" for strict occgrid, "rlx" for relaxed.
-'taulow' is the threshold for the values in the relaxed occgrid. If a
value is >taulow it is used in the relaxed occgrid and ignored
otherwise. This exists to limit the error that can happen due to the
usage of a relaxed occupancy grid.
-'variant': there are two variants to calculate these values, which
are equivalent, but return two different outputs.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Returns the Occupancy Grid of the Domain 'OGD', the Occupancy Grid of the
Ball neighborhoord 'OGB', and the other two Occupancy Grids necessary for
the convolution, which are OGB*(-x), here 'OGBX', and OGB*(x*x^T), here
called 'OGBXX'.
"""
if debug==1:
starttime = time.time()
if type(inp)==str:
#load pointcloud
if ocg=="str":
x, y, z = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2))
else:
x, y, z, vals = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2,3))
elif isinstance(inp,np.ndarray):
z = inp[:,0]
y = inp[:,1]
x = inp[:,2]
if ocg=="rlx":
vals = inp[:,3]
elif isinstance(inp,list):
#we assume the input list is given like [z,y,x]
inp = np.transpose(inp) # shape is now [#pts, 3] (= z y x)
#get the separate coordinate vectors
z = inp[:,0]
y = inp[:,1]
x = inp[:,2]
if ocg=="rlx":
vals = inp[:,3]
else:
raise NameError('Input can be an already loaded pointcloud that \
consists of the three coordinates z y x or a string \
that leads to the file that is in z y x format with \
no header.')
if debug==1:
print("Initialised the input point cloud.\n"
+ "Current Runtime: " + str(time.time() - starttime))
print("Number of Points in the pointcloud: " + str(np.shape(x)[0]))
if ocg=="str":
OGD = constructoccgrid_pointcloud([z,y,x], rho)
else:
OGD = constructoccgrid_pointcloud([z,y,x, vals], rho, ocg=ocg,
taulow=taulow, fill=0)
OGB = constructoccgrid_ball(kr)
if (np.shape(OGB)[0] %2 == 0 |
np.shape(OGB)[1] %2 == 0 |
np.shape(OGB)[2] %2 == 0):
warnings.warn("The lengths of the kernel should be uneven s.t. there\
is a proper center element.")
if variant==0:
ogbx, ogby, ogbz, ogbxx, ogbyy, ogbzz, ogbxy,\
ogbxz, ogbyz = np.zeros([9, np.shape(OGB)[0], np.shape(OGB)[1],
np.shape(OGB)[2]])
cx=np.shape(OGB)[1] // 2
cy=np.shape(OGB)[0] // 2
cz=np.shape(OGB)[2] // 2
coords = np.ones([np.shape(OGB)[0],np.shape(OGB)[1],np.shape(OGB)[2]])
xcoord = np.linspace(-cx,np.shape(OGB)[1]-1-cx,np.shape(OGB)[1])
xcoords = np.multiply(np.transpose(coords,(0,2,1)),xcoord)
ogbx = np.multiply(OGB, np.transpose(-xcoords,(0,2,1)))
ogbxx = np.multiply(OGB, np.transpose(xcoords**2,(0,2,1)))
ycoord = np.linspace(-cy,np.shape(OGB)[0]-1-cy,np.shape(OGB)[0])
ycoords = np.multiply(np.transpose(coords,(2,1,0)),ycoord)
ogby = np.multiply(OGB, np.transpose(-ycoords,(2,1,0)))
ogbyy = np.multiply(OGB, np.transpose(ycoords**2,(2,1,0)))
zcoord = np.linspace(-cz,np.shape(OGB)[2]-1-cz,np.shape(OGB)[2])
zcoords = np.multiply(np.transpose(coords,(0,1,2)),zcoord)
ogbz = np.multiply(OGB, np.transpose(-zcoords,(0,1,2)))
ogbzz = np.multiply(OGB, np.transpose(zcoords**2,(0,1,2)))
ogbxy = np.multiply(np.transpose(xcoords,(0,2,1)), -ogby)
ogbxz = np.multiply(np.transpose(xcoords,(0,2,1)), -ogbz)
ogbyz = np.multiply(np.transpose(ycoords,(2,1,0)), -ogbz)
OGBX = [ogbx, ogby, ogbz]
OGBXX = [[ogbxx, ogbxy, ogbxz],
[ogbxy, ogbyy, ogbyz],
[ogbxz, ogbyz, ogbzz]]
if debug==1:
print("Computed OGD, OGB, OGBX, OGBXX.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return OGD, OGB, OGBX, OGBXX
elif variant==1:
xcoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
ycoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
zcoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
cx=np.shape(OGB)[2] // 2
cy=np.shape(OGB)[1] // 2
cz=np.shape(OGB)[0] // 2 #this is all the same value in general
#this is rather inefficient
for z in range(np.shape(OGB)[0]):
for y in range(np.shape(OGB)[1]):
for x in range(np.shape(OGB)[2]):
xcoords[z,y,x]=x-cx
ycoords[z,y,x]=y-cy
zcoords[z,y,x]=z-cz
if debug==1:
print("Computed OGD, OGB, xcoords,ycoords,zcoords.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return OGD, OGB, xcoords, ycoords, zcoords
def constructpcagrids_ocg(inpoccgrid, kr=6, variant=1, debug=0):
"""
Construct all necessary occupancy grids for the calculation of the
curvature estimation using integral invariants and PCA.
Input:
-'inp' is a strict or relaxed occupancy grid.
-'kr' is the kernel radius
-'variant': there are two variants to calculate these values, which
are equivalent, but return two different outputs.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Returns the Occupancy Grid of the Domain 'OGD', the Occupancy Grid of the
Ball neighborhoord 'OGB', and the other two Occupancy Grids necessary for
the convolution, which are OGB*(-x), here 'OGBX', and OGB*(x*x^T), here
called 'OGBXX'.
"""
if debug==1:
starttime = time.time()
OGD = inpoccgrid
OGB = constructoccgrid_ball(kr)
if (np.shape(OGB)[0] %2 == 0 |
np.shape(OGB)[1] %2 == 0 |
np.shape(OGB)[2] %2 == 0):
warnings.warn("The lengths of the kernel should be uneven s.t. there\
is a proper center element.")
if variant==0:
ogbx, ogby, ogbz, ogbxx, ogbyy, ogbzz, ogbxy,\
ogbxz, ogbyz = np.zeros([9, np.shape(OGB)[0], np.shape(OGB)[1],
np.shape(OGB)[2]])
cx=np.shape(OGB)[1] // 2
cy=np.shape(OGB)[0] // 2
cz=np.shape(OGB)[2] // 2
coords = np.ones([np.shape(OGB)[0],np.shape(OGB)[1],np.shape(OGB)[2]])
xcoord = np.linspace(-cx,np.shape(OGB)[1]-1-cx,np.shape(OGB)[1])
xcoords = np.multiply(np.transpose(coords,(0,2,1)),xcoord)
ogbx = np.multiply(OGB, np.transpose(-xcoords,(0,2,1)))
ogbxx = np.multiply(OGB, np.transpose(xcoords**2,(0,2,1)))
ycoord = np.linspace(-cy,np.shape(OGB)[0]-1-cy,np.shape(OGB)[0])
ycoords = np.multiply(np.transpose(coords,(2,1,0)),ycoord)
ogby = np.multiply(OGB, np.transpose(-ycoords,(2,1,0)))
ogbyy = np.multiply(OGB, np.transpose(ycoords**2,(2,1,0)))
zcoord = np.linspace(-cz,np.shape(OGB)[2]-1-cz,np.shape(OGB)[2])
zcoords = np.multiply(np.transpose(coords,(0,1,2)),zcoord)
ogbz = np.multiply(OGB, np.transpose(-zcoords,(0,1,2)))
ogbzz = np.multiply(OGB, np.transpose(zcoords**2,(0,1,2)))
ogbxy = np.multiply(np.transpose(xcoords,(0,2,1)), -ogby)
ogbxz = np.multiply(np.transpose(xcoords,(0,2,1)), -ogbz)
ogbyz = np.multiply(np.transpose(ycoords,(2,1,0)), -ogbz)
OGBX = [ogbx, ogby, ogbz]
OGBXX = [[ogbxx, ogbxy, ogbxz],
[ogbxy, ogbyy, ogbyz],
[ogbxz, ogbyz, ogbzz]]
if debug==1:
print("Computed OGD, OGB, OGBX, OGBXX.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return OGD, OGB, OGBX, OGBXX
elif variant==1:
xcoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
ycoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
zcoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
cx=np.shape(OGB)[2] // 2
cy=np.shape(OGB)[1] // 2
cz=np.shape(OGB)[0] // 2 #this is all the same value in general
#this is rather inefficient
for z in range(np.shape(OGB)[0]):
for y in range(np.shape(OGB)[0]):
for x in range(np.shape(OGB)[0]):
xcoords[z,y,x]=x-cx
ycoords[z,y,x]=y-cy
zcoords[z,y,x]=z-cz
if debug==1:
print("Computed OGD, OGB, xcoords,ycoords,zcoords.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return OGD, OGB, xcoords, ycoords, zcoords
def constructpcagrids_ms_pointcloud(inp, kr, rho, startscale, scaledist,
ocg="str", taulow=0, variant=1,
debug=0):
"""
Construct all necessary occupancy grids for the calculation of the
multiscale curvature estimation using integral invariants and PCA.
The main difference is the splitting of the kernel into separate parts.
Input:
-'inp' can be an already loaded pointcloud that consists of the three
coordinates x y z or a string that leads to the file that is in x y z
format with no header.
-'rho' controls the amount of cells in the occupancy grid.
-'kr' is the kernel radius
-'ocg' specifies wether a strict or relaxed occupancy grid is used in
the method itself. Use "str" for strict occgrid, "rlx" for relaxed.
-'taulow' is the threshold for the values in the relaxed occgrid. If a
value is >taulow it is used in the relaxed occgrid and ignored
otherwise. This exists to limit the error that can happen due to the
usage of a relaxed occupancy grid.
-'variant': there are two variants to calculate these values, which
are equivalent, but return two different outputs.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Returns the Occupancy Grid of the Domain 'OGD', the Occupancy Grid of the
Ball neighborhoord 'OGB', and the other two Occupancy Grids necessary for
the convolution, which are OGB*(-x), here 'OGBX', and OGB*(x*x^T), here
called 'OGBXX'.
"""
if debug==1:
starttime = time.time()
if type(inp)==str:
#load pointcloud
if ocg=="str":
x, y, z = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2))
else:
x, y, z, vals = np.loadtxt(inp,skiprows=1, unpack=True,
usecols=(0,1,2,3))
elif isinstance(inp,np.ndarray):
z = inp[:,0]
y = inp[:,1]
x = inp[:,2]
if ocg=="rlx":
vals = inp[:,3]
elif isinstance(inp,list):
#we assume the input list is given like [z,y,x]
inp = np.transpose(inp) # shape is now [#pts, 3] (= z y x)
#get the separate coordinate vectors
z = inp[:,0]
y = inp[:,1]
x = inp[:,2]
if ocg=="rlx":
vals = inp[:,3]
else:
raise NameError('Input can be an already loaded pointcloud that \
consists of the three coordinates z y x or a string \
that leads to the file that is in z y x format with \
no header.')
if debug==1:
print("Initialised the input point cloud.\n"
+ "Current Runtime: " + str(time.time() - starttime))
print("Number of Points in the pointcloud: " + str(np.shape(x)[0]))
if ocg=="str":
OGD = constructoccgrid_pointcloud([z,y,x], rho)
else:
OGD = constructoccgrid_pointcloud([z,y,x, vals], rho, ocg=ocg,
taulow=taulow)
if kr==startscale:
OGB = constructoccgrid_ball(kr)
else:
OGB = constructoccgrid_ballzone(kr=kr, alpha=scaledist)
if (np.shape(OGB)[0] %2 == 0 |
np.shape(OGB)[1] %2 == 0 |
np.shape(OGB)[2] %2 == 0):
warnings.warn("The lengths of the kernel should be uneven s.t. there\
is a proper center element.")
if variant==0:
ogbx, ogby, ogbz, ogbxx, ogbyy, ogbzz, ogbxy,\
ogbxz, ogbyz = np.zeros([9, np.shape(OGB)[0], np.shape(OGB)[1],
np.shape(OGB)[2]])
cx=np.shape(OGB)[1] // 2
cy=np.shape(OGB)[0] // 2
cz=np.shape(OGB)[2] // 2
coords = np.ones([np.shape(OGB)[0],np.shape(OGB)[1],np.shape(OGB)[2]])
xcoord = np.linspace(-cx,np.shape(OGB)[1]-1-cx,np.shape(OGB)[1])
xcoords = np.multiply(np.transpose(coords,(0,2,1)),xcoord)
ogbx = np.multiply(OGB, np.transpose(-xcoords,(0,2,1)))
ogbxx = np.multiply(OGB, np.transpose(xcoords**2,(0,2,1)))
ycoord = np.linspace(-cy,np.shape(OGB)[0]-1-cy,np.shape(OGB)[0])
ycoords = np.multiply(np.transpose(coords,(2,1,0)),ycoord)
ogby = np.multiply(OGB, np.transpose(-ycoords,(2,1,0)))
ogbyy = np.multiply(OGB, np.transpose(ycoords**2,(2,1,0)))
zcoord = np.linspace(-cz,np.shape(OGB)[2]-1-cz,np.shape(OGB)[2])
zcoords = np.multiply(np.transpose(coords,(0,1,2)),zcoord)
ogbz = np.multiply(OGB, np.transpose(-zcoords,(0,1,2)))
ogbzz = np.multiply(OGB, np.transpose(zcoords**2,(0,1,2)))
ogbxy = np.multiply(np.transpose(xcoords,(0,2,1)), -ogby)
ogbxz = np.multiply(np.transpose(xcoords,(0,2,1)), -ogbz)
ogbyz = np.multiply(np.transpose(ycoords,(2,1,0)), -ogbz)
OGBX = [ogbx, ogby, ogbz]
OGBXX = [[ogbxx, ogbxy, ogbxz],
[ogbxy, ogbyy, ogbyz],
[ogbxz, ogbyz, ogbzz]]
if debug==1:
print("Computed OGD, OGB, OGBX, OGBXX.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return OGD, OGB, OGBX, OGBXX
elif variant==1:
xcoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
ycoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
zcoords = np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]])
cx=np.shape(OGB)[2] // 2
cy=np.shape(OGB)[1] // 2
cz=np.shape(OGB)[0] // 2 #this is all the same value in general
#this is rather inefficient
for z in range(np.shape(OGB)[0]):
for y in range(np.shape(OGB)[0]):
for x in range(np.shape(OGB)[0]):
xcoords[z,y,x]=x-cx
ycoords[z,y,x]=y-cy
zcoords[z,y,x]=z-cz
if debug==1:
print("Computed OGD, OGB, xcoords,ycoords,zcoords.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return OGD, OGB, xcoords, ycoords, zcoords
def constructpcagrids_ms_ocg(inpocg, kr, startscale, scaledist,
variant=1, debug=0):
"""
Construct all necessary occupancy grids for the calculation of the
multiscale curvature estimation using integral invariants and PCA.
The main difference is the splitting of the kernel into separate parts.
Input:
-'inp' is a strict or relaxed occupancy grid
-'kr' is the kernel radius
-'variant': there are two variants to calculate these values, which
are equivalent, but return two different outputs.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Returns the Occupancy Grid of the Domain 'OGD', the Occupancy Grid of the
Ball neighborhoord 'OGB', and the other two Occupancy Grids necessary for
the convolution, which are OGB*(-x), here 'OGBX', and OGB*(x*x^T), here
called 'OGBXX'.
"""
if debug==1:
starttime = time.time()
OGD = inpocg
if kr==startscale:
OGB = constructoccgrid_ball(kr)
else:
OGB = constructoccgrid_ballzone(kr=kr, alpha=scaledist)
if (np.shape(OGB)[0] %2 == 0 |
np.shape(OGB)[1] %2 == 0 |
np.shape(OGB)[2] %2 == 0):
warnings.warn("The lengths of the kernel should be uneven s.t. there\
is a proper center element.")
if variant==0:
ogbx, ogby, ogbz, ogbxx, ogbyy, ogbzz, ogbxy,\
ogbxz, ogbyz = np.zeros([9, np.shape(OGB)[0], np.shape(OGB)[1],
np.shape(OGB)[2]])
cx=np.shape(OGB)[1] // 2
cy=np.shape(OGB)[0] // 2
cz=np.shape(OGB)[2] // 2
coords = np.ones([np.shape(OGB)[0],np.shape(OGB)[1],np.shape(OGB)[2]])
xcoord = np.linspace(-cx,np.shape(OGB)[1]-1-cx,np.shape(OGB)[1])
xcoords = np.multiply(np.transpose(coords,(0,2,1)),xcoord)
ogbx = np.multiply(OGB, np.transpose(-xcoords,(0,2,1)))
ogbxx = np.multiply(OGB, np.transpose(xcoords**2,(0,2,1)))
ycoord = np.linspace(-cy,np.shape(OGB)[0]-1-cy,np.shape(OGB)[0])
ycoords = np.multiply(np.transpose(coords,(2,1,0)),ycoord)
ogby = np.multiply(OGB, np.transpose(-ycoords,(2,1,0)))
ogbyy = np.multiply(OGB, np.transpose(ycoords**2,(2,1,0)))
zcoord = np.linspace(-cz,np.shape(OGB)[2]-1-cz,np.shape(OGB)[2])
zcoords = np.multiply(np.transpose(coords,(0,1,2)),zcoord)
ogbz = np.multiply(OGB, np.transpose(-zcoords,(0,1,2)))
ogbzz = np.multiply(OGB, np.transpose(zcoords**2,(0,1,2)))
ogbxy = np.multiply(np.transpose(xcoords,(0,2,1)), -ogby)
ogbxz = np.multiply(np.transpose(xcoords,(0,2,1)), -ogbz)
ogbyz = np.multiply(np.transpose(ycoords,(2,1,0)), -ogbz)
OGBX = [ogbx, ogby, ogbz]
OGBXX = [[ogbxx, ogbxy, ogbxz],
[ogbxy, ogbyy, ogbyz],
[ogbxz, ogbyz, ogbzz]]
if debug==1:
print("Computed OGD, OGB, OGBX, OGBXX.\n"
+ "Current Runtime: " + str(time.time() - starttime))
return OGD, OGB, OGBX, OGBXX
elif variant==1:
xcoords = | np.zeros([OGB.shape[0],OGB.shape[1],OGB.shape[2]]) | numpy.zeros |
import numpy as np
from .util import pad, make_windows, weights_to_laplacian
import scipy.sparse
def closed_form_laplacian(image, epsilon):
h, w, depth = image.shape
n = h * w
indices = np.arange(n).reshape(h, w)
neighbor_indices = make_windows(pad(indices))
# shape: h w 3
means = make_windows(pad(image)).mean(axis=2)
# shape: h w 9 3
centered_neighbors = make_windows(pad(image)) - means.reshape(h, w, 1, depth)
# shape: h w 3 3
covariance = np.matmul(centered_neighbors.transpose(0, 1, 3, 2), centered_neighbors) / (3 * 3)
inv_cov = np.linalg.inv(covariance + epsilon / (3 * 3) * np.eye(3, 3))
# shape: h w 9 3
weights = np.matmul(centered_neighbors, inv_cov)
# shape: h w 9 9
weights = 1 + np.matmul(weights, centered_neighbors.transpose(0, 1, 3, 2))
i_inds = | np.tile(neighbor_indices, 3 * 3) | numpy.tile |
__all__ = ['logpolar', 'patch_match']
import supreme as sr
import supreme.geometry
import supreme.config
_log = supreme.config.get_log(__name__)
from supreme.config import ftype,itype
from supreme.io import Image
import numpy as np
import scipy.fftpack as fftpack
from itertools import izip
from scipy import ndimage as ndi
import timeit
fft2 = fftpack.fft2
ifft2 = fftpack.ifft2
def patch_match(a, b, angles=360, Rs=None, plot_corr=False):
"""Align two patches, using the log polar transform.
Parameters
----------
a : ndarray of uint8
Reference image.
b : ndarray of uint8
Target image.
angles : int
Number of angles to use in log-polar transform.
Rs : int
Number of radial samples used in the log-polar transform.
plot_corr : bool, optional
Whether to plot the phase correlation coefficients.
Returns
-------
c : float
Peak correlation value.
theta : float
Estimated rotation angle from `a` to `b`.
scale : float
Estimated scaling from `a` to `b`.
"""
from image import phase_corr
import supreme.transform as tr
angles = np.linspace(0, np.pi * 2, angles)
if Rs is None:
Rs = max(a.shape[:2])
A, angles, log_base = tr.logpolar(a, angles=angles, Rs=Rs, extra_info=True)
B = tr.logpolar(b, angles=angles, Rs=Rs)
cv = phase_corr(B, A)
m, n = np.unravel_index(np.argmax(cv), cv.shape)
if n > Rs/2:
n = n - Rs # correlation matched, but from the other side
if plot_corr:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
cv_cut = cv[max(0, m - 30):min(cv.shape[1], m + 30),
max(0, n - 30):min(cv.shape[0], n + 30)]
coords = sr.geometry.Grid(*cv_cut.shape)
ax3d = axes3d.Axes3D(fig)
ax3d.plot_wireframe(coords['cols'], coords['rows'], cv_cut)
ax3d.set_title('Phase correlation around peak\n$\\log(100 + x)$')
plt.show()
return cv[m, n], angles[m], np.exp(n * log_base)
def _clearborder(image,border_shape):
rows,cols = image.shape
br,bc = border_shape
image[:br,:] = 0
image[rows-br:,:] = 0
image[:,:bc] = 0
image[:,cols-bc:] = 0
return image
def _peaks(image,nr,minvar=0):
"""Divide image into nr quadrants and return peak value positions."""
n = np.ceil(np.sqrt(nr))
quadrants = _rects(image.shape,n,n)
peaks = []
for q in quadrants:
q_image = image[q.as_slice()]
q_argmax = q_image.argmax()
q_maxpos = np.unravel_index(q_argmax,q.shape)
if q_image.flat[q_argmax] > minvar:
peaks.append(np.array(q_maxpos) + q.origin)
return peaks
def rectangle_inside(shape,percent=10):
"""Return a path inside the border as defined by shape."""
shape = np.asarray(shape)
rtop = np.round_(shape*percent/100.)
rbottom = shape - rtop
cp = sr.geometry.coord_path
return cp.build(cp.rectangle(rtop,rbottom))
def _rects(shape,divide_rows,divide_cols):
class Rect:
def __init__(self,top_r,top_c,height,width):
self.top_r = top_r
self.top_c = top_c
self.width = width
self.height = height
@property
def origin(self):
return (self.top_r,self.top_c)
@property
def shape(self):
return (int(self.height),int(self.width))
@property
def coords(self):
"""x- and y-coordinates, rather than row/column"""
return (self.top_c,self.top_c,
self.top_c+self.width,self.top_c+self.width),\
(self.top_r,self.top_r+self.height,
self.top_r+self.height,self.top_r)
def as_slice(self):
return [slice(self.top_r,self.top_r+self.height),
slice(self.top_c,self.top_c+self.width)]
def __str__(self):
return "Rectangle: (%d,%d), height: %d, width: %d" % \
(self.top_r,self.top_c,self.height,self.width)
rows,cols = shape
rows = np.linspace(0,rows,divide_rows+1).astype(int)
cols = np.linspace(0,cols,divide_cols+1).astype(int)
rects = []
for r0,r1 in zip(rows[:-1],rows[1:]):
for c0,c1 in zip(cols[:-1],cols[1:]):
rects.append(Rect(r0,c0,r1-r0,c1-c0))
return rects
def _lpt_on_path(image,path,shape, **lp_args):
"""Calculate log polar transforms along a given path."""
path = list(path)
cutouts = sr.geometry.cut.along_path(path,image,shape=shape)
for pos,cut in izip(path,cutouts):
lpt = sr.transform.logpolar(cut, **lp_args)
yield (pos,cut,lpt - lpt.mean())
def _lpt_corr(reference_frames,
frame, descr, path, window_shape, fft_shape,
angles, log_base,
**lpt_args):
try:
max_corr_sofar = descr['source'].info['variance']
except:
max_corr_sofar = 0
corr_vals = []
for pos,cut,lpt in _lpt_on_path(frame,path,window_shape,
**lpt_args):
# prepare correlation FFT
X = fft2(lpt)
for rf in reference_frames:
# Phase correlation
corr = rf['fft'] * X.conj()
corr /= np.abs(corr)
corr = np.abs(ifft2(corr))
corr_max_arg = corr.argmax()
corr_max = corr.flat[corr_max_arg]
corr_vals.append(corr_max)
if corr_max_arg != 0 and corr_max > max_corr_sofar:
rotation, scale = | np.unravel_index(corr_max_arg, fft_shape) | numpy.unravel_index |
import matplotlib.pyplot as plt
import yaml
import os
import zipfile
import numpy as np
# ['all_grad_norm', 'all_updated_grad_norm', 'all_probability', 'all_entropy', 'all_entropy_scale', 'all_probability_distribution']
plt.figure()
npz_path = '/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion/log/imagenet1000_classifier256x256_channel128_upperbound/predict/model500000_imagenet1000_stepsddim25_sample192_plot/metainfo_scale10.0_stepsddim25_class0_samples_192x256x256x3.npz'
npz = np.load(npz_path)
arr = npz['all_probability']
plt.plot([t for t in range(960, -1, -40)], np.mean(arr, axis=1))
plt.xticks([960 - t for t in range(960, -1, -160)], [])
npz_path = '/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion/log/imagenet1000_classifier256x256_channel128_upperbound/predict/model500000_imagenet1000_stepsddim25_sample192_plot_entropyScale/metainfo_scale5.0_stepsddim25_class0_samples_192x256x256x3.npz'
# npz_path = '/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion/log/imagenet1000_classifier256x256_channel128_upperbound/predict/model500000_imagenet1000_stepsddim25_sample192_plot_entropyScale/metainfo_scale10.0_stepsddim25_class0_samples_192x256x256x3.npz'
# npz_path = '/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion/log/imagenet1000_classifier256x256_channel128_upperbound/predict/model500000_imagenet1000_stepsddim25_sample5000_plot_entropyScale/metainfo_scale10.0_stepsddim25_class0-999_samples_5000x256x256x3.npz'
npz = np.load(npz_path)
arr = npz['all_probability']
plt.plot([t for t in range(960, -1, -40)], | np.mean(arr, axis=1) | numpy.mean |
# threads.py
#
import time
import pickle
from datetime import datetime
import numpy as np
from PyQt5.QtCore import QThread
from .. import __version__
from ..fly import Fly
from ..ts import FixedCourtshipTrackingSummary
from .tracking import *
class TrackingThread(QThread):
"""Worker thread to run tracking algorithm.
Parameters
----------
video_settings : list of TrackingSettings
Each TrackingSettings object should be completely filled before
creating an instance of this object, and running this thread.
logger : QTextEdit
To store information about video being currently tracked.
This may need to be changed to prevent "QObject::connect: Cannot
queue arguments of type 'QTextBlock' from being displayed.
"""
def __init__(self, video_settings, logger, progress, parent=None):
super(TrackingThread, self).__init__(parent)
self.video_settings = video_settings
self.logger = logger
self.tracking_progress = progress
def run(self):
for ix in xrange(len(self.video_settings)):
start_time = time.time()
settings = self.video_settings[ix]
video = settings.video
n_frames = video.get_n_frames()
timestamps = video.get_all_timestamps()
fps = (1. / np.mean( | np.diff(timestamps) | numpy.diff |
from __future__ import absolute_import, division, print_function
import numpy as np
import time
import copy
from utils.npangles import quaternion_between, quaternion_to_expmap, expmap_to_rotmat, rotmat_to_euler, rotmat_to_quaternion, rotate_vector_by_quaternion
MASK_MODES = ('No mask', 'Future Prediction', 'Missing Frames', 'Occlusion Simulation', 'Structured Occlusion', 'Noisy Transmission')
def gen_mask(mask_type, keep_prob, batch_size, njoints, seq_len, body_members, baseline_mode=False):
# Default mask, no mask
mask = np.ones(shape=(batch_size, njoints, seq_len, 1))
if mask_type == 1: # Future Prediction
mask[:, :, np.int(seq_len * keep_prob):, :] = 0.0
elif mask_type == 2: # Missing Frames
occ_frames = np.random.randint(seq_len - 1, size=np.int(seq_len * (1.0 - keep_prob)))
mask[:, :, occ_frames, :] = 0.0
elif mask_type == 3: # Occlusion Simulation
rand_joints = np.random.randint(njoints, size=np.int(njoints * (1.0 - keep_prob)))
mask[:, rand_joints, :, :] = 0.0
elif mask_type == 4: # Structured Occlusion Simulation
rand_joints = set()
while ((njoints - len(rand_joints)) >
(njoints * keep_prob)):
joints_to_add = (body_members.values()[np.random.randint(len(body_members))])['joints']
for joint in joints_to_add:
rand_joints.add(joint)
mask[:, list(rand_joints), :, :] = 0.0
elif mask_type == 5: # Noisy transmission
mask = np.random.binomial(1, keep_prob, size=mask.shape)
if baseline_mode:
# This unmasks first and last frame for all sequences (required for baselines)
mask[:, :, [0, -1], :] = 1.0
return mask
def gen_latent_noise(batch_size, latent_cond_dim):
return np.random.uniform(size=(batch_size, latent_cond_dim))
def linear_baseline(real_seq, mask):
linear_seq = real_seq * mask
for j in range(real_seq.shape[0]):
for f in range(1, real_seq.shape[1] - 1):
if mask[j, f, 0] == 0:
prev_f = f - 1
for g in range(f - 1, -1, -1):
if mask[j, g, 0] == 1:
prev_f = g
break
next_f = f + 1
for g in range(f + 1, real_seq.shape[1]):
if mask[j, g, 0] == 1:
next_f = g
break
blend_factor = (f - prev_f) / (next_f - prev_f)
linear_seq[j, f, :] = ((linear_seq[j, prev_f, :] * (1 - blend_factor)) +
(linear_seq[j, next_f, :] * blend_factor))
return linear_seq
def burke_baseline(rawdata, mask, tol=0.0025, sigR=1e-3, keepOriginal=True):
"""Low-Rank smoothed Kalman filter, based in Burke et. al"""
rawdata = np.transpose(rawdata.copy(), (1, 0, 2))
raw_shape = [int(dim) for dim in rawdata.shape]
rawdata = np.reshape(rawdata, (raw_shape[0], raw_shape[1] * raw_shape[2]))
mask = np.tile(mask.copy(), (1, 1, raw_shape[2]))
mask = np.transpose(mask, (1, 0, 2))
mask = np.reshape(mask, (raw_shape[0], raw_shape[1] * raw_shape[2]))
rawdata[mask == 0] = np.nan
X = rawdata[~np.isnan(rawdata).any(axis=1)]
if X.size == 0 or np.product(X.shape[-2:]) == 0:
return np.zeros((raw_shape[1], raw_shape[0], raw_shape[2]))
m = np.mean(X, axis=0)
U, S, V = np.linalg.svd(X - m)
d = np.nonzero( | np.cumsum(S) | numpy.cumsum |
import inspect
import sys
from typing import List, Optional
import numpy as np
from .config import (
SIMULATION_CONFIG,
get_observation_names,
get_reward_names_and_weights,
)
from .models import Direction, Node, Table
from .simulation import Factory
__all__ = ["get_observations", "get_reward", "get_done", "can_move_in_direction"]
def get_done(agent_id: int, factory: Factory) -> bool:
"""We're done with the table if it doesn't have a core anymore or we're out of moves."""
counter = factory.agent_step_counter.get(agent_id)
if counter > factory.max_num_steps:
# Note that we track the maximum number of steps per agent, not in total.
return True
agent: Table = factory.tables[agent_id]
return not agent.has_core()
def get_reward(agent_id: int, factory: Factory, episodes: int) -> float:
"""Get the reward for a single agent in its current state.
Similar to observations, reward terms get configured in config.yml.
"""
# only configured rewards get picked up
rewards_to_use = get_reward_names_and_weights()
rewards = {}
if SIMULATION_CONFIG.get("tighten_max_steps"):
discount_by = SIMULATION_CONFIG.get("discount_episodes_by")
discount_until = SIMULATION_CONFIG.get("discount_episodes_until")
episode_discount = max(discount_until, 1 - (episodes / float(discount_by)))
factory.max_num_steps = int(factory.initial_max_num_steps * episode_discount)
# We set this to 0, as this is just a mean to decrease the max step count over
# time. This leads to shorter episodes and is reflected in "rew_punish_slow_tables".
rewards["rew_tighten_max_steps"] = 0
max_num_steps = factory.max_num_steps
steps = factory.agent_step_counter.get(agent_id)
agent: Table = factory.tables[agent_id]
# sum negative rewards due to collisions and illegal moves
if factory.moves.get(agent_id):
move = factory.moves[agent_id].pop(-1)
rewards["rew_collisions"] = move.reward()
# high incentive for reaching a target, quickly
time_taken = steps / float(max_num_steps)
if agent.is_at_target:
rewards["rew_found_target"] = 1.0 - time_taken
rewards["rew_found_target_squared"] = (1.0 - time_taken) ** 2
# Draft reward term that accounts for physical moves
physical_moves = factory.move_counter["MOVED"]
physical_moves_ratio = physical_moves / float(max_num_steps)
if agent.is_at_target:
rewards["rew_found_target_physical"] = 1.0 - physical_moves_ratio
rewards["rew_found_target_physical_squared"] = (1.0 - physical_moves_ratio) ** 2
# punish if too slow
if steps == max_num_steps:
num_cores_left = len([t for t in factory.tables if t.has_core()])
rewards["rew_punish_slow_tables"] = -1 * num_cores_left
if not agent.has_core():
# If an agent without core is close to one with core, let it shy away...
rewards["rew_avoid_cores"] = -1.0 * has_core_neighbour(agent.node, factory)
# ... and if it sits on any current target, punish it
# TODO: how can we account for the fact that tables might be "on the path"
# to a target, given that we don't know what the path is?
# Maybe we can account for situation in which ALL paths to a core target are obstructed
# by this table?
all_targets = [c.current_target for c in factory.cores]
rewards["rew_blocking_target"] = -1.0 * int(agent.node in all_targets)
reward = 0
for reward_name, weight in rewards_to_use.items():
# multiply rewards by configured weight terms
reward += rewards.get(reward_name, 0) * weight
return reward
def one_hot_encode(total: int, positions: List[int]):
"""Compute one-hot encoding of a list of positions (ones) in
a vector of length 'total'."""
lst = [0 for _ in range(total)]
for position in positions:
assert position <= total, "index out of bounds"
lst[position] = 1
return lst
def can_move_in_direction(node: Node, direction: Direction, factory: Factory):
"""If an agent has a neighbour in the specified direction, add a 1,
else 0 to the observation space. If that neighbour is free, add 1,
else 0 (a non-existing neighbour counts as occupied).
"""
has_direction = node.has_neighbour(direction)
is_free = False
if has_direction:
neighbour: Node = node.get_neighbour(direction)
if neighbour.is_rail:
neighbour_rail = factory.get_rail(neighbour)
is_free = neighbour_rail.is_free() or node in neighbour_rail.nodes
else:
is_free = not neighbour.has_table()
return is_free
def has_core_neighbour(node: Node, factory: Factory):
"""If a node has at least one direct neighbour with a core, return True,
else False. We use this to inform tables without cores to move out of the way
of tables with cores."""
for direction in Direction:
has_direction = node.has_neighbour(direction)
is_free = can_move_in_direction(node, direction, factory)
if has_direction and not is_free:
neighbour: Node = node.get_neighbour(direction)
if neighbour.has_table() and neighbour.table.has_core():
return True
return False
# COORDINATE-BASED OBSERVATIONS
# TODO: investigate if there's a way to assess the "value" of locations for
# non-core bearing tables to move to? This way we could assign them more concrete goals in the reward function.
def obs_agent_id(agent_id: int, factory: Factory) -> np.ndarray:
"""This agent's ID"""
return np.asarray([agent_id])
def obs_agent_coordinates(agent_id: int, factory: Factory) -> np.ndarray:
"""This agent's coordinates"""
agent: Table = factory.tables[agent_id]
return np.asarray(list(agent.node.coordinates))
def obs_all_table_coordinates(agent_id: int, factory: Factory) -> np.ndarray:
"""encode all table coordinates"""
coordinates = []
for table in factory.tables:
coordinates += list(table.node.coordinates)
return np.asarray(coordinates)
def obs_agent_has_neighbour(agent_id: int, factory: Factory) -> np.ndarray:
"""Does this agent have a neighbouring node?"""
agent: Table = factory.tables[agent_id]
return np.asarray(
[
agent.node.has_neighbour(Direction.up),
agent.node.has_neighbour(Direction.right),
agent.node.has_neighbour(Direction.down),
agent.node.has_neighbour(Direction.left),
]
)
def obs_agent_free_neighbour(agent_id: int, factory: Factory):
"""Does this agent have a neighbouring node and, if so, can we go there?"""
agent: Table = factory.tables[agent_id]
return np.asarray(
[
can_move_in_direction(agent.node, Direction.up, factory),
can_move_in_direction(agent.node, Direction.right, factory),
can_move_in_direction(agent.node, Direction.down, factory),
can_move_in_direction(agent.node, Direction.left, factory),
]
)
def obs_agent_has_core(agent_id: int, factory: Factory):
"""Does this agent have a core?"""
agent: Table = factory.tables[agent_id]
return np.asarray([agent.has_core()])
def obs_agent_core_target_coordinates(agent_id: int, factory: Factory):
"""Return this agent's core coordinates, it it has a core, otherwise [-1, -1]"""
agent: Table = factory.tables[agent_id]
if agent.has_core():
current_target: Node = agent.core.current_target
return np.asarray(list(current_target.coordinates))
else:
# TODO: is there something more natural/clever than this?
# compare this to the one-hot encoded situation, where we simply have a zeros-only array.
return | np.asarray([-1, -1]) | numpy.asarray |
"""
Copyright 2019 <NAME>, <NAME>
This file is part of A2DR.
A2DR is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
A2DR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with A2DR. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy as np
import scipy as sp
import numpy.linalg as LA
import copy
import time
import scipy.sparse.linalg
import matplotlib.pyplot as plt
from cvxpy import *
from scipy import sparse
from scipy.optimize import nnls
from sklearn.datasets import make_sparse_spd_matrix
from a2dr import a2dr
from a2dr.proximal import *
from a2dr.tests.base_test import BaseTest
class TestPaper(BaseTest):
"""Reproducible tests and plots for A2DR paper experiments."""
def setUp(self):
np.random.seed(1)
self.eps_rel = 1e-8 # specify these in all examples?
self.eps_abs = 1e-6
self.MAX_ITER = 1000
def test_nnls(self, figname):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 10000, 8000
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finish DRS.')
# Solve with A2DR.
t0 = time.time()
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
t1 = time.time()
a2dr_beta = a2dr_result["x_vals"][-1]
print('nonzero entries proportion = {}'.format(np.sum(a2dr_beta > 0)*1.0/len(a2dr_beta)))
print('Finish A2DR.')
self.compare_total(drs_result, a2dr_result, figname)
# Check solution correctness.
print('run time of A2DR = {}'.format(t1-t0))
print('constraint violation of A2DR = {}'.format(np.min(a2dr_beta)))
print('objective value of A2DR = {}'.format(np.linalg.norm(F.dot(a2dr_beta)-g)))
def test_nnls_reg(self, figname):
# minimize ||Fz - g||_2^2 subject to z >= 0.
# Problem data.
p, q = 300, 500
density = 0.001
F = sparse.random(p, q, density=density, data_rvs=np.random.randn)
g = np.random.randn(p)
# Convert problem to standard form.
# f_1(x_1) = ||Fx_1 - g||_2^2, f_2(x_2) = I(x_2 >= 0).
# A_1 = I_n, A_2 = -I_n, b = 0.
prox_list = [lambda v, t: prox_sum_squares_affine(v, t, F, g), prox_nonneg_constr]
A_list = [sparse.eye(q), -sparse.eye(q)]
b = np.zeros(q)
# Solve with no regularization.
a2dr_noreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, lam_accel=0, max_iter=self.MAX_ITER)
print('Finish A2DR no regularization.')
# Solve with constant regularization.
a2dr_consreg_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=False, max_iter=self.MAX_ITER)
print('Finish A2DR constant regularization.')
# Solve with adaptive regularization.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, ada_reg=True, max_iter=self.MAX_ITER)
print('Finish A2DR adaptive regularization.')
self.compare_total_all([a2dr_noreg_result, a2dr_consreg_result, a2dr_result],
['no-reg', 'constant-reg', 'ada-reg'], figname)
def test_sparse_inv_covariance(self, q, alpha_ratio, figname):
# minimize -log(det(S)) + trace(S*Q) + \alpha*||S||_1 subject to S is symmetric PSD.
# Problem data.
# q: Dimension of matrix.
p = 1000 # Number of samples.
ratio = 0.9 # Fraction of zeros in S.
S_true = sparse.csc_matrix(make_sparse_spd_matrix(q, ratio))
Sigma = sparse.linalg.inv(S_true).todense()
z_sample = np.real(sp.linalg.sqrtm(Sigma)).dot(np.random.randn(q,p)) # make sure it's real matrices.
Q = np.cov(z_sample)
print('Q is positive definite? {}'.format(bool(LA.slogdet(Q)[0])))
mask = np.ones(Q.shape, dtype=bool)
np.fill_diagonal(mask, 0)
alpha_max = np.max(np.abs(Q)[mask])
alpha = alpha_ratio*alpha_max # 0.001 for q = 100, 0.01 for q = 50.
# Convert problem to standard form.
# f_1(S_1) = -log(det(S_1)) + trace(S_1*Q) on symmetric PSD matrices, f_2(S_2) = \alpha*||S_2||_1.
# A_1 = I, A_2 = -I, b = 0.
prox_list = [lambda v, t: prox_neg_log_det(v.reshape((q,q), order='C'), t, lin_term=t*Q).ravel(order='C'),
lambda v, t: prox_norm1(v, t*alpha)]
A_list = [sparse.eye(q*q), -sparse.eye(q*q)]
b = np.zeros(q*q)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
# lam_accel = 0 seems to work well sometimes, although it oscillates a lot.
a2dr_S = a2dr_result["x_vals"][-1].reshape((q,q), order='C')
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
print('recovered sparsity = {}'.format(np.sum(a2dr_S != 0)*1.0/a2dr_S.shape[0]**2))
def test_l1_trend_filtering(self, figname):
# minimize (1/2)||y - z||_2^2 + \alpha*||Dz||_1,
# where (Dz)_{t-1} = z_{t-1} - 2*z_t + z_{t+1} for t = 2,...,q-1.
# Reference: https://web.stanford.edu/~boyd/papers/l1_trend_filter.html
# Problem data.
q = int(2*10**4)
y = np.random.randn(q)
alpha = 0.01*np.linalg.norm(y, np.inf)
# Form second difference matrix.
D = sparse.lil_matrix(sparse.eye(q))
D.setdiag(-2, k = 1)
D.setdiag(1, k = 2)
D = D[:(q-2),:]
# Convert problem to standard form.
# f_1(x_1) = (1/2)||y - x_1||_2^2, f_2(x_2) = \alpha*||x_2||_1.
# A_1 = D, A_2 = -I_{n-2}, b = 0.
prox_list = [lambda v, t: prox_sum_squares(v, t = 0.5*t, offset = y),
lambda v, t: prox_norm1(v, t = alpha*t)]
A_list = [D, -sparse.eye(q-2)]
b = np.zeros(q-2)
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
def test_optimal_control(self, figname):
# Problem data/
p = 80
q = 150
L = 20
F = np.random.randn(q,q)
G = np.random.randn(q,p)
h = np.random.randn(q)
z_init = np.random.randn(q)
F = F / np.max(np.abs(LA.eigvals(F)))
z_hat = z_init
for l in range(L-1):
u_hat = np.random.randn(p)
u_hat = u_hat / np.max(np.abs(u_hat))
z_hat = F.dot(z_hat) + G.dot(u_hat) + h
z_term = z_hat
# no normalization of u_hat actually leads to more significant improvement of A2DR over DRS, and also happens to be feasible
# x_term = 0 also happens to be feasible
# Convert problem to standard form.
def prox_sat(v, t, v_lo = -np.inf, v_hi = np.inf):
return prox_box_constr(prox_sum_squares(v, t), t, v_lo, v_hi)
prox_list = [prox_sum_squares, lambda v, t: prox_sat(v, t, -1, 1)]
A1 = sparse.lil_matrix(((L+1)*q,L*q))
A1[q:L*q,:(L-1)*q] = -sparse.block_diag((L-1)*[F])
A1.setdiag(1)
A1[L*q:,(L-1)*q:] = sparse.eye(q)
A2 = sparse.lil_matrix(((L+1)*q,L*p))
A2[q:L*q,:(L-1)*p] = -sparse.block_diag((L-1)*[G])
A_list = [sparse.csr_matrix(A1), sparse.csr_matrix(A2)]
b_list = [z_init]
b_list.extend((L-1)*[h])
b_list.extend([z_term])
b = np.concatenate(b_list)
# Solve with CVXPY
z = Variable((L,q))
u = Variable((L,p))
obj = sum([sum_squares(z[l]) + sum_squares(u[l]) for l in range(L)])
constr = [z[0] == z_init, norm_inf(u) <= 1]
constr += [z[l+1] == F*z[l] + G*u[l] + h for l in range(L-1)]
constr += [z[L-1] == z_term]
prob = Problem(Minimize(obj), constr)
prob.solve(solver='SCS', verbose=True)
# OSQP fails for p=50, q=100, L=30, and also for p=100, q=200, L=30
# SCS also fails to converge
cvxpy_obj = prob.value
cvxpy_z = z.value.ravel(order='C')
cvxpy_u = u.value.ravel(order='C')
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('Finished DRS.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
self.compare_total(drs_result, a2dr_result, figname)
print('Finished A2DR.')
# check solution correctness
a2dr_z = a2dr_result['x_vals'][0]
a2dr_u = a2dr_result['x_vals'][1]
a2dr_obj = np.sum(a2dr_z**2) + np.sum(a2dr_u**2)
cvxpy_obj_raw = np.sum(cvxpy_z**2) + np.sum(cvxpy_u**2)
cvxpy_Z = cvxpy_z.reshape([L,q], order='C')
cvxpy_U = cvxpy_u.reshape([L,p], order='C')
a2dr_Z = a2dr_z.reshape([L,q], order='C')
a2dr_U = a2dr_u.reshape([L,p], order='C')
cvxpy_constr_vio = [np.linalg.norm(cvxpy_Z[0]-z_init), np.linalg.norm(cvxpy_Z[L-1]-z_term)]
a2dr_constr_vio = [np.linalg.norm(a2dr_Z[0]-z_init), np.linalg.norm(a2dr_Z[L-1]-z_term)]
for l in range(L-1):
cvxpy_constr_vio.append(np.linalg.norm(cvxpy_Z[l+1]-F.dot(cvxpy_Z[l])-G.dot(cvxpy_U[l])-h))
a2dr_constr_vio.append(np.linalg.norm(a2dr_Z[l+1]-F.dot(a2dr_Z[l])-G.dot(a2dr_U[l])-h))
print('linear constr vio cvxpy = {}, linear constr_vio a2dr = {}'.format(
np.mean(cvxpy_constr_vio), np.mean(a2dr_constr_vio)))
print('norm constr vio cvxpy = {}, norm constr vio a2dr = {}'.format(np.max(np.abs(cvxpy_u)),
np.max(np.abs(a2dr_u))))
print('objective cvxpy = {}, objective cvxpy raw = {}, objective a2dr = {}'.format(cvxpy_obj,
cvxpy_obj_raw,
a2dr_obj))
def test_coupled_qp(self, figname):
# Problem data.
L = 8 # number of blocks
s = 50 # number of coupling constraints
ql = 300 # variable dimension of each subproblem QP
pl = 200 # constraint dimension of each subproblem QP
G_list = [np.random.randn(s,ql) for l in range(L)]
F_list = [np.random.randn(pl,ql) for l in range(L)]
c_list = [np.random.randn(ql) for l in range(L)]
z_tld_list = [np.random.randn(ql) for l in range(L)]
d_list = [F_list[l].dot(z_tld_list[l])+0.1 for l in range(L)]
G = np.hstack(G_list)
z_tld = np.hstack(z_tld_list)
h = G.dot(z_tld)
H_list = [np.random.randn(ql,ql) for l in range(L)]
Q_list = [H_list[l].T.dot(H_list[l]) for l in range(L)]
# Convert problem to standard form.
def tmp(l, Q_list, c_list, F_list, d_list):
return lambda v, t: prox_qp(v, t, Q_list[l], c_list[l], F_list[l], d_list[l])
# Use "map" method to avoid implicit overriding, which would make all the proximal operators the same
prox_list = list(map(lambda l: tmp(l, Q_list, c_list, F_list, d_list), range(L)))
A_list = G_list
b = h
# Solve with DRS.
drs_result = a2dr(prox_list, A_list, b, anderson=False, precond=True, max_iter=self.MAX_ITER)
print('DRS finished.')
# Solve with A2DR.
a2dr_result = a2dr(prox_list, A_list, b, anderson=True, precond=True, max_iter=self.MAX_ITER)
print('A2DR finished.')
self.compare_total(drs_result, a2dr_result, figname)
# Check solution correctness.
a2dr_z = a2dr_result['x_vals']
a2dr_obj = np.sum([a2dr_z[l].dot(Q_list[l]).dot(a2dr_z[l])
+ c_list[l].dot(a2dr_z[l]) for l in range(L)])
a2dr_constr_vio = [np.linalg.norm(np.maximum(F_list[l].dot(a2dr_z[l])-d_list[l],0))**2
for l in range(L)]
a2dr_constr_vio += [np.linalg.norm(G.dot( | np.hstack(a2dr_z) | numpy.hstack |
import numpy as np
from scipy import optimize
def fit(X,
y,
population_count=100,
elite_count=2,
velocity_rate=0.001,
epoch_count=25):
params_count = 4
lower_limits = np.array([0, 0, -np.pi, -1])
upper_limits = np.array([1, np.pi * 2, np.pi, 1])
bounds = np.array([(l, u) for l, u in zip(lower_limits, upper_limits)])
def function(afsb, t):
return afsb[..., 0:1] * np.sin(afsb[..., 1:2] * t - afsb[..., 2:3]) + afsb[..., 3:4]
def error(params, X, y):
y_ = function(params, X)
return np.sqrt(np.sum((y - y_) ** 2, axis=-1) / X.shape[-1])
def extinctions(fitness):
return (swarm_fitness + np.min(swarm_fitness) * (
((params_count - 1.0) / (population_count - 1.0)) - 1.0)) / np.max(
swarm_fitness)
# initial population
swarm_positions = np.random.uniform(lower_limits, upper_limits, (population_count, params_count))
swarm_velocities = np.random.uniform(-0.1, 0.1, population_count * params_count).reshape(
(population_count, params_count))
swarm_fitness = error(swarm_positions, X[np.newaxis, :], y)
swarm_extinction = extinctions(swarm_fitness)
swarm_sorted_args = np.argsort(swarm_fitness, axis=0)
# global best
solution = swarm_positions[swarm_sorted_args[0], ...]
best_fitness = swarm_fitness[swarm_sorted_args[0]]
# iterate
for epoch in range(epoch_count):
# early exit if close enough
if best_fitness < 1e-6:
break
# pick elites and do a gradient descent using l-bfgs-b algorithm
for e in range(elite_count):
x, _, _ = optimize.fmin_l_bfgs_b(
func=error,
x0=swarm_positions[swarm_sorted_args[e], ...],
args=(X[np.newaxis, :], y),
approx_grad=True,
bounds=bounds,
maxiter=100)
swarm_velocities[swarm_sorted_args[e], ...] = np.random.uniform() * \
swarm_velocities[swarm_sorted_args[e], ...] + x - \
swarm_positions[swarm_sorted_args[e], ...]
swarm_positions[swarm_sorted_args[e], ...] = x
# create the offsprings
offspring_positions = np.zeros((population_count, params_count), dtype=np.float32)
offspring_velocities = np.zeros((population_count, params_count), dtype=np.float32)
offspring_fitness = np.zeros(population_count, dtype=np.float32)
# populate offsprings
for off in range(population_count):
parents_count = len(swarm_sorted_args)
# rank based selection
probabilities = np.array([parents_count - i for i in range(parents_count)], dtype=np.float32)
probabilities /= np.sum(probabilities)
a, b, prot = np.random.choice(swarm_sorted_args, 3, p=probabilities, replace=False)
# combine parents
mix_values = np.random.uniform(size=params_count)
offspring_positions[off, :] = swarm_positions[a, :] * mix_values + \
swarm_positions[b, :] * (1.0 - mix_values)
# add a bit of the velocity from the parents
offspring_positions[off, :] += velocity_rate * (swarm_velocities[a, :] + swarm_velocities[b, :])
# use the velocities from the parents
offspring_velocities[off, :] = np.random.uniform(size=params_count) * swarm_velocities[a, :] + \
np.random.uniform(size=params_count) * swarm_velocities[b, :]
# mutate
p = (np.mean(swarm_extinction[[a, b]]) * (params_count - 1.0) + 1.0) / params_count
if p < np.random.uniform():
swarm_min = np.min(swarm_positions, axis=0)
swarm_max = np.max(swarm_positions, axis=0)
x = np.random.uniform(-1, 1, size=params_count) * np.mean(swarm_extinction[[a, b]]) * (
swarm_max - swarm_min)
offspring_velocities[off, :] += x
offspring_positions[off, :] += x
# adoption
mix_values = | np.random.uniform(size=params_count) | numpy.random.uniform |
# This code is optimized for just the 3D plot of the evolution of
# The defuzzified output based on the input parameters
import numpy as np
import matplotlib.pyplot as plt
# A function that creates a triangle membership function
def triangleMembershipFunction(x, a, b, c):
# This function takes in an array of x points and
# a vector containing a, b and c
# where a=> left side, b = center, c = right side
y = np.zeros(len(x)) # shape of the output array
# Setting the areas outside of our interest to zero
y[x <= a] = 0
y[x >= c] = 0
# Setting the center to 1
y[x == b] = 1
# Determine the index of the left side and right side
left_side = np.logical_and(a < x, x <= b)
right_side = np.logical_and(b < x, x < c)
# Replacing those values with the appropriate values using interpolation
y[left_side] = (x[left_side] - a) / (b - a)
y[right_side] = (x[right_side] - c) / (b - c)
return y
# A function that interpolates the fuzzy data to give the degree of membership
def Interpolate(x, y, value):
# This function takes in:
# x => a 1d array of x-axis coordinates
# y => a 1d array of y-axis coordinates
# value => the exact value we desire the interpolation for
y = np.interp(value, x, y)
return y
# A function that computes the center of Gravity of 2 Arrays
def Centroid(x, aggregate):
# This function takes in the x-axis of the curve and the aggregated area of the
# fuzzy set and returns the centroid position
centroid = np.sum(x * aggregate) / np.sum(aggregate)
return centroid
# A function that computes the mean of maximum of 2 arrays
def MeanOfMax(x, a):
# This function takes in a 1-D array a => aggregate of the fuzzy logic
# another 1-D array x=> the x-axis
# Returns the mean of the maximum value(s) of the aggregate array
idx = np.argwhere(a == np.max(a)) # Finding the index of all max values on the y-axis
res = np.mean(x[idx]) # computing the mean of the max values on the x-axis
return res
# Generating the universe variables
# *Ammo and Health input ranges [0, 100]
# *Action output ranges [0, 100]
# Generating the points from 1 to 100
n = 1000 # number of points
x_ammo = np.linspace(0, 100, n)
x_health = np.linspace(0, 100, n)
x_action = np.linspace(0, 100, n)
# Fuzzy Logic Game Engine
# This function takes in the health and ammo value and returns the
# fuzzy action output after applying fuzzy logic
# it returns 4 outputs: *the crisp value from Sum aggregate and centroid defuzz
# *the crisp value from Sum aggregate and Mean of Maximum defuzz
# *the crisp value from Max aggregate and Mean of Maximum defuzz
# *the crisp value from Max aggregate and Mean of Maximum defuzz
def fuzzyEngine(x_ammo, x_health, x_action, health, ammo):
# This function takes in the ammo x-axis, health x-axis, ammo x-axis
# the health and ammo input
# Then returns the crisp value of the action to be take.
# Creating triangle fuzzy membership functions for the ammo
ammo_vlo = triangleMembershipFunction(x_ammo, 0, 0, 25) # very low ammo
ammo_lo = triangleMembershipFunction(x_ammo, 0, 25, 50) # low ammo
ammo_md = triangleMembershipFunction(x_ammo, 25, 50, 75) # medium ammo
ammo_hi = triangleMembershipFunction(x_ammo, 50, 75, 100) # high ammo
ammo_vhi = triangleMembershipFunction(x_ammo, 75, 100, 100) # very high ammo
# Creating triangle fuzzy membership functions for the health set
health_vlo = triangleMembershipFunction(x_health, 0, 0, 25) # very low health
health_lo = triangleMembershipFunction(x_health, 0, 25, 50) # low health
health_md = triangleMembershipFunction(x_health, 25, 50, 75) # medium health
health_hi = triangleMembershipFunction(x_health, 50, 75, 100) # high health
health_vhi = triangleMembershipFunction(x_health, 75, 100, 100) # very high health
# Creating triangle fuzzy membership functions for the action set
hide = triangleMembershipFunction(x_action, 0, 0, 25)
run = triangleMembershipFunction(x_action, 0, 25, 50)
stop = triangleMembershipFunction(x_action, 25, 50, 75)
walk = triangleMembershipFunction(x_action, 50, 75, 100)
attack = triangleMembershipFunction(x_action, 75, 100, 100)
# Calculating the degree of membership for the input variables
# Degree of memberships for ammo
ammo_level_vlo = Interpolate(x_ammo, ammo_vlo, ammo)
ammo_level_lo = Interpolate(x_ammo, ammo_lo, ammo)
ammo_level_md = Interpolate(x_ammo, ammo_md, ammo)
ammo_level_hi = Interpolate(x_ammo, ammo_hi, ammo)
ammo_level_vhi = Interpolate(x_ammo, ammo_vhi, ammo)
# Degree of memberships for health
health_level_vlo = Interpolate(x_health, health_vlo, health)
health_level_lo = Interpolate(x_health, health_lo, health)
health_level_md = Interpolate(x_health, health_md, health)
health_level_hi = Interpolate(x_health, health_hi, health)
health_level_vhi = Interpolate(x_health, health_vhi, health)
# Applying the rules specified by the article.
# The condition is the AND condition, thus we use the MIN operator
# There are 25 rules in total from the article
rule1 = np.fmin(ammo_level_vlo, health_level_vlo) # ammo very low and health very low => hide
rule2 = np.fmin(ammo_level_vlo, health_level_lo) # ammo very low and health low => hide
rule3 = np.fmin(ammo_level_vlo, health_level_md) # ammo very low and health mid => run away
rule4 = np.fmin(ammo_level_vlo, health_level_hi) # ammo very low and health high => run away
rule5 = np.fmin(ammo_level_vlo, health_level_vhi) # ammo very low and health very high => stop
rule6 = np.fmin(ammo_level_lo, health_level_vlo) # ammo low and health very low => hide
rule7 = np.fmin(ammo_level_lo, health_level_lo) # ammo low and health low => run away
rule8 = np.fmin(ammo_level_lo, health_level_md) # ammo low and health mid => run away
rule9 = np.fmin(ammo_level_lo, health_level_hi) # ammo low and health high => stop
rule10 = np.fmin(ammo_level_lo, health_level_vhi) # ammo low and health very high => walk around
rule11 = np.fmin(ammo_level_md, health_level_vlo) # ammo mid and health very low => run away
rule12 = np.fmin(ammo_level_md, health_level_lo) # ammo mid and health low => run away
rule13 = np.fmin(ammo_level_md, health_level_md) # ammo mid and health mid => stop
rule14 = np.fmin(ammo_level_md, health_level_hi) # ammo mid and health high => walk around
rule15 = np.fmin(ammo_level_md, health_level_vhi) # ammo mid and health very high => walk around
rule16 = np.fmin(ammo_level_hi, health_level_vlo) # ammo high and health very low => run away
rule17 = np.fmin(ammo_level_hi, health_level_lo) # ammo high and health low => stop
rule18 = np.fmin(ammo_level_hi, health_level_md) # ammo high and health mid => walk around
rule19 = np.fmin(ammo_level_hi, health_level_hi) # ammo high and health high => walk around
rule20 = np.fmin(ammo_level_hi, health_level_vhi) # ammo high and health very high => attack
rule21 = np.fmin(ammo_level_vhi, health_level_vlo) # ammo very high and health very low => stop
rule22 = np.fmin(ammo_level_vhi, health_level_lo) # ammo very high and health low => walk around
rule23 = np.fmin(ammo_level_vhi, health_level_md) # ammo very high and health mid => walk around
rule24 = np.fmin(ammo_level_vhi, health_level_hi) # ammo very high and health high => attack
rule25 = np.fmin(ammo_level_vhi, health_level_vhi) # ammo very high and health very high => attack
# Finding the points in the universe activated by the rules
# This is useful for plotting the region on the graph
# The logic is to cut the output domain but the y-value of the rules
rule1_area = np.fmin(hide, rule1) # hide zone
rule2_area = np.fmin(hide, rule2) # hide zone
rule3_area = np.fmin(run, rule3) # run away zone
rule4_area = np.fmin(run, rule4) # run zone
rule5_area = np.fmin(stop, rule5) # stop zone
rule6_area = np.fmin(hide, rule6) # hide zone
rule7_area = np.fmin(run, rule7) # run away zone
rule8_area = np.fmin(run, rule8) # run away zone
rule9_area = np.fmin(stop, rule9) # stop zone
rule10_area = np.fmin(walk, rule10) # walk around zone
rule11_area = np.fmin(run, rule11) # run away zone
rule12_area = np.fmin(run, rule12) # run away zone
rule13_area = np.fmin(stop, rule13) # stop zone
rule14_area = np.fmin(walk, rule14) # walk around zone
rule15_area = np.fmin(walk, rule15) # walk around zone
rule16_area = np.fmin(run, rule16) # run away zone
rule17_area = | np.fmin(stop, rule17) | numpy.fmin |
import urllib.request
import tarfile
import zipfile
import os.path
import numpy as np
def download_url(url, save_path):
with urllib.request.urlopen(url) as dl_file:
with open(save_path, 'wb') as out_file:
out_file.write(dl_file.read())
def untar_file(data_dir, file_name):
tar_name = os.path.join(data_dir, file_name)
with tarfile.open(tar_name) as tar:
tar.extractall()
tar.close()
def unzip_file(file_name, data_dir=None):
if (data_dir is None):
data_dir = os.path.dirname(file_name)
with zipfile.ZipFile(file_name, 'r') as zip_ref:
zip_ref.extractall(data_dir)
def edge_list_to_dense(elist, num_vertices = 75):
adj_mat = np.zeros((num_vertices,num_vertices), dtype=np.float)
num_edges = elist.shape[0]
for edge in range(num_edges):
source, sink = elist[edge,:]
source = source.item()
sink = sink.item()
adj_mat[source][sink] = 1.0
adj_mat[sink][source] = 1.0
return adj_mat
########################################################
#
# TU Dataset specific functions
#
########################################################
def extract_node_features(node_slices, node_labels, max_nodes, num_classes = None):
node_label_list = []
for i, ind in enumerate(node_slices[1:]):
if num_classes:
graph_x = np.eye(num_classes)[np.asarray([int(x) for x in node_labels[node_slices[i]:ind]],dtype=np.int)]
else:
graph_x = anp.asarray([int(x) for x in node_labels[node_slices[i]:ind]],dtype=np.int)
if (len(graph_x) < max_nodes):
pad = max_nodes - len(graph_x)
graph_x = np.pad(graph_x, ((0,pad),(0,0)), 'constant')
node_label_list.append(graph_x)
return node_label_list
def extract_adj_mat(node_slices, edge_list, max_nodes):
adj_mat_list = []
removed_graphs = []
for i, max_node_id in enumerate(node_slices[1:]):
min_node_id = node_slices[i]
num_nodes = max_node_id - min_node_id
if (num_nodes < max_nodes):
edges = edge_list[(edge_list[:,1] > min_node_id) & (edge_list[:,1] < max_node_id)]
edges = edges -1 - min_node_id
adj_mat = edge_list_to_dense(edges, max_nodes)
adj_mat_list.append(adj_mat)
else:
removed_graphs.append(i)
return adj_mat_list, removed_graphs
def extract_targets(graph_labels, num_classes, removed_graphs):
graph_labels = np.array([int(x) for x in graph_labels])
labels = | np.eye(num_classes) | numpy.eye |
import logging
import numpy as np
import scipy as sp
import collections
import torch
import functools
from numpy.lib.stride_tricks import as_strided
from sklearn.utils.extmath import randomized_svd
from sklearn.utils import check_random_state
logging.basicConfig()
def form_lag_matrix(X, T, stride=1, stride_tricks=True, rng=None, writeable=False):
"""Form the data matrix with `T` lags.
Parameters
----------
X : ndarray (n_time, N)
Timeseries with no lags.
T : int
Number of lags.
stride : int or float
If stride is an `int`, it defines the stride between lagged samples used
to estimate the cross covariance matrix. Setting stride > 1 can speed up the
calculation, but may lead to a loss in accuracy. Setting stride to a `float`
greater than 0 and less than 1 will random subselect samples.
rng : NumPy random state
Only used if `stride` is a float.
stride_tricks : bool
Whether to use numpy stride tricks to form the lagged matrix or create
a new array. Using numpy stride tricks can can lower memory usage, especially for
large `T`. If `False`, a new array is created.
writeable : bool
For testing. You should not need to set this to True. This function uses stride tricks
to form the lag matrix which means writing to the array will have confusing behavior.
If `stride_tricks` is `False`, this flag does nothing.
Returns
-------
X_with_lags : ndarray (n_lagged_time, N * T)
Timeseries with lags.
"""
if not isinstance(stride, int) or stride < 1:
if not isinstance(stride, float) or stride <= 0. or stride >= 1.:
raise ValueError('stride should be an int and greater than or equal to 1 or a float ' +
'between 0 and 1.')
N = X.shape[1]
frac = None
if isinstance(stride, float):
frac = stride
stride = 1
n_lagged_samples = (len(X) - T) // stride + 1
if n_lagged_samples < 1:
raise ValueError('T is too long for a timeseries of length {}.'.format(len(X)))
if stride_tricks:
X = | np.asarray(X, dtype=float, order='C') | numpy.asarray |
import os, time
from typing import Dict, Tuple, Callable, Union, Optional, List
import ctypes as ct
from pathlib import Path
from functools import partial
import pandas as pd
import numpy as np
import numba as nb
from astropy import units as un
from astropy.constants import sigma_T, m_p, c
sigma_T = sigma_T.to(un.Mpc**2).value #[Mpc^2]
m_p = m_p.to(un.M_sun).value #[M_sun]
c_light = c.to("km/s").value
T_cmb = 2.7251 #[K]
Gcm2 = 4.785E-20 # G/c^2 (Mpc/M_sun)
c_lib_path = Path(__file__).parent.absolute()
class SkyNumbaUtils:
@nb.jit(nopython=True)
def convert_convergence_to_deflection_numba(
kappa: np.ndarray,
npix: int,
opening_angle: float,
padding_factor: int = 4,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Args:
kappa:
Convergence map
opening_angle:
Edge length of field-of-view [angular distance]
npix:
Number of pixels along edge of field-of-view
Returns:
alpha1,2:
deflection angle in units of opening_angle
"""
xlen, ylen = kappa.shape
xpad, ypad = xlen * padFac, ylen * padFac
# Array of 2D dimensionless coordinates
xsgrid, ysgrid = _make_r_coor(opening_angle, xlen)
x = np.zeros((xlen, xlen, 2))
x[:, :, 0] = xsgrid
x[:, :, 1] = ysgrid
Lx = x[-1, 0, 0] - x[0, 0, 0]
Ly = x[0, -1, 1] - x[0, 0, 1]
# round to power of 2 to speed up FFT
xpad = np.int(2 ** (np.ceil(np.log2(xpad))))
ypad = np.int(2 ** (np.ceil(np.log2(ypad))))
kappa_ft = np.fft.fft2(kappa, s=[xpad, ypad])
Lxpad = Lx * xpad / xlen
Lypad = Ly * ypad / ylen
# make a k-space grid
kxgrid, kygrid = np.meshgrid(
np.fft.fftfreq(xpad), np.fft.fftfreq(ypad), indexing="ij"
)
kxgrid *= 2 * np.pi * xpad / Lxpad
kygrid *= 2 * np.pi * ypad / Lypad
alphaX_kfac = 2j * kxgrid / (kxgrid ** 2 + kygrid ** 2)
alphaY_kfac = 2j * kygrid / (kxgrid ** 2 + kygrid ** 2)
# [0,0] component mucked up by dividing by k^2
alphaX_kfac[0, 0], alphaY_kfac[0, 0] = 0, 0
alphaX_ft = alphaX_kfac * kappa_ft
alphaY_ft = alphaY_kfac * kappa_ft
alphaX = np.fft.ifft2(alphaX_ft)[:xlen, :ylen]
alphaY = np.fft.ifft2(alphaY_ft)[:xlen, :ylen]
alpha = np.zeros(x.shape)
alpha[:, :, 0] = alphaX
alpha[:, :, 1] = alphaY
return -alpha # minus sign correction
class SkyUtils:
def analytic_Halo_signal_to_SkyArray(
halo_idx: np.array,
halo_cat: Dict[str, list],
extent: int,
direction: list,
suppress: bool,
suppression_R: float,
npix: int,
signal: str,
) -> np.ndarray:
map_array = np.zeros((npix, npix))
# filter out indicis from halo catalogue
for key, val in halo_cat.items():
halo_cat[key] = np.asarray(val)[halo_idx]
if signal == "dT":
partial_method= partial(
SkyUtils.NFW_temperature_perturbation_map,
extent=extent,
direction=direction,
suppress=suppress,
suppression_R=suppression_R,
)
for idx in range(len(halo_idx)):
map_halo = partial_method(
halo_cat["r200_deg"][idx],
halo_cat["m200"][idx],
halo_cat["c_NFW"][idx],
[halo_cat["theta1_tv"][idx], halo_cat["theta2_tv"][idx]],
halo_cat["Dc"][idx] * 0.6774,
npix = int(2 * halo_cat["r200_pix"][idx] * extent) + 1,
)
map_array = SkyUtils.add_patch_to_map(
map_array,
map_halo,
(halo_cat["theta1_pix"][idx], halo_cat["theta2_pix"][idx]),
)
elif signal == "alpha":
partial_method= partial(
SkyUtils.NFW_deflection_angle_map,
extent=extent,
direction=direction,
suppress=suppress,
suppression_R=suppression_R,
)
for idx in range(len(halo_idx)):
map_halo = partial_method(
halo_cat["r200_deg"][idx],
halo_cat["m200"][idx],
halo_cat["c_NFW"][idx],
halo_cat["Dc"][idx] * 0.6774,
npix = int(2 * halo_cat["r200_pix"][idx] * extent) + 1,
)
map_array = SkyUtils.add_patch_to_map(
map_array,
map_halo,
(halo_cat["theta1_pix"][idx], halo_cat["theta2_pix"][idx]),
)
return map_array
def add_patch_to_map(
limg: np.ndarray, simg: np.ndarray, cen_pix: tuple,
) -> tuple:
"""
Add small image (simg) onto large image (limg) such that simg does
not shoot over the boundary of limg.
Args:
limg, simg: large-image to which small-image will be added.
cen_pix: (x,y)-coordinates of the centre of the small image. The small
image needs to have an uneven number of pixels.
Returns:
simg: Small image that is guaranteed to not exceed the boundary of
the large image
x,y-lim: pixel coordinates for the large image
"""
#assert limg.flags['C_CONTIGUOUS']
#assert simg.flags['C_CONTIGUOUS']
rad = int(len(simg)/2)
xedges = np.arange(cen_pix[0] - rad, cen_pix[0] + rad + 1)
yedges = np.arange(cen_pix[1] - rad, cen_pix[1] + rad + 1)
x_pix, y_pix = np.meshgrid(xedges, yedges)
mask = np.logical_and(
np.logical_and(0 <= x_pix, x_pix < len(limg)),
np.logical_and(0 <= y_pix, y_pix < len(limg)),
)
x_bool = np.sum(mask, axis=0) > 0
y_bool = np.sum(mask, axis=1) > 0
simg = simg[mask].reshape((np.sum(y_bool), np.sum(x_bool)))
xlim = np.array([xedges[x_bool].min(), xedges[x_bool].max()+1]).astype(int)
ylim = np.array([yedges[y_bool].min(), yedges[y_bool].max()+1]).astype(int)
limg[ylim[0]:ylim[1], xlim[0]:xlim[1]] += simg
return limg
def NFW_temperature_perturbation_map(
theta_200c: float,
M_200c: float,
c_200c: float,
vel: Union[list, tuple, np.ndarray],
angu_diam_dist: float,
npix: int = 100,
extent: float = 1,
direction: List[int] = [0, 1],
suppress: bool = False,
suppression_R: float = 1,
) -> np.ndarray:
"""
The Rees-Sciama / Birkinshaw-Gull / moving cluster of galaxies effect.
Args:
vel: transverse to the line-of-sight velocity, [km/sec]
Returns:
Temperature perturbation map, \Delta T / T_CMB, [-]
"""
dt_map = np.zeros((npix, npix))
for direc in direction:
alpha_map = SkyUtils.NFW_deflection_angle_map(
theta_200c,
M_200c,
c_200c,
angu_diam_dist,
npix,
extent,
[direc],
suppress,
suppression_R,
)
dt_map += - alpha_map * vel[direc] / c_light
return dt_map
def NFW_deflection_angle_map(
theta_200c: float,
M_200c: float,
c_200c: float,
angu_diam_dist: float,
npix: int = 100,
extent: float = 1,
direction: List[int] = [0],
suppress: bool = False,
suppression_R: float = 1,
) -> np.ndarray:
"""
Calculate the deflection angle of a halo with NFW profile using method
in described Sec. 3.2 in Baxter et al 2015 (1412.7521).
Note:
In this application it can be assumed that s_{SL}/s_{S}=1. Furthermore,
we can deglect vec{theta}/norm{theta} as it will be multiplied by the
same in the integral of Eq. 9 in Yasini et a. 2018 (1812.04241).
Args:
theta_200c: radius, [deg]
M_200c: mass, [Msun]
c_200c: concentration, [-]
extent: The size of the map from which the trans-vel is calculated
in units of R200 of the associated halo.
suppress:
suppression_R:
angu_diam_dist: angular diameter distance, [Mpc]
direction: 0=(along x-axis), 1=(along y-axis)
Returns:
Deflection angle map.
"""
assert np.sum(direction) <= 1, "Only 0 and 1 are valid direction indications."
R_200c = np.tan(theta_200c * np.pi / 180) * angu_diam_dist # [Mpc]
edges = np.linspace(0, 2*R_200c*extent, npix) - R_200c * extent
thetax, thetay = np.meshgrid(edges, edges)
R = np.sqrt(thetax ** 2 + thetay ** 2) # distances to pixels
# Eq. 8
A = M_200c * c_200c ** 2 / (np.log(1 + c_200c) - c_200c / (1 + c_200c)) / 4. / np.pi
# constant in Eq. 6
C = 16 * np.pi * Gcm2 * A / c_200c / R_200c
R_s = R_200c / c_200c
x = R / R_s
x = x.astype(np.complex)
# Eq. 7
f = np.true_divide(1, x) * (
np.log(x / 2) + 2 / np.sqrt(1 - x ** 2) * \
np.arctanh(np.sqrt(np.true_divide(1 - x, 1 + x)))
)
alpha_map = np.zeros((npix, npix)).astype(np.complex)
for direc in direction:
# Eq. 6
if direc == 0:
thetax_hat = np.true_divide(thetax, R)
alpha_map += C * thetax_hat * f
elif direc == 1:
thetay_hat = np.true_divide(thetay, R)
alpha_map += C * thetay_hat * f
alpha_map = np.nan_to_num(alpha_map, copy=False, nan=0.0, posinf=0.0, neginf=0.0)
if suppress: # suppress alpha at large radii
suppress_radius = suppression_R * R_200c
alpha_map *= np.exp(-(R / suppress_radius) ** 3)
alpha_map = alpha_map.real
alpha_map[abs(alpha_map) > 100] = 0. # remove unphysical results
return alpha_map
#@LOS_integrate
#def rho_2D(R, rho_s, R_s):
# """
# 3D NFW profile intgrated along the line of sight
# Returns
# -------
# surface mass density: [M_sun/Mpc^2]
# """
# return rho_3D(R, rho_s, R_s)
#def LOS_integrate(profile_3D, *args, **kwargs):
# """integrate along the line of sight for all 3D r's that correspond to the 2D R"""
# # extract R
# R = args[0]
# args = args[1:]
# #TODO: Add support for R_vec too
# # see if R is a scalar or a list (array)
# R_is_scalar = False
# if not hasattr(R, "__iter__"):
# R_is_scalar = True
# R = [R]
# # integrate along LOS for each R
# LOS_integrated = []
# for R_i in R:
# #TODO: Take f outside and profile the funtion
# f = lambda r: profile_3D(r, *args, **kwargs) * 2. * r / np.sqrt(r ** 2 - R_i ** 2)
# LOS_integrated.append(integrate.quad(f, R_i, np.inf, epsabs=0., epsrel=1.e-2)[0])
# # if R was a scalar, return a scalar
# if R_is_scalar:
# LOS_integrated = LOS_integrated[0]
# return np.asarray(LOS_integrated)
def convert_code_to_phy_units(
quantity: str, map_df: pd.DataFrame
) -> pd.DataFrame:
"""
Convert from RayRamses code units to physical units.
Args:
quantity:
map_df:
Returns:
Dimensionless values converted from code to physical units.
shear_: [-]
deflt_: double check
kappa_: [-]
isw_rs: \Delta T / T_cmb [-]
"""
if quantity in ["shear_x", "shear_y", "deflt_x", "deflt_y", "kappa_2"]:
map_df.loc[:, [quantity]] /= c_light ** 2
elif quantity in ["isw_rs"]:
map_df.loc[:, [quantity]] /= c_light ** 3
return map_df
def convert_deflection_to_shear(
alpha1: np.ndarray, alpha2: np.ndarray, npix: int, opening_angle: float
) -> Tuple[np.ndarray, np.ndarray]:
"""
Args:
alpha1,2:
opening_angle:
Edge length of field-of-view [angular distance]
npix:
Number of pixels along edge of field-of-view
Returns:
gamma1,2: shear map
"""
# TODO
al11 = 1 - np.gradient(alpha1, coord, axis=0)
al12 = -np.gradient(alpha1, coord, axis=1)
al21 = -np.gradient(alpha2, coord, axis=0)
al22 = 1 - np.gradient(alpha2, coord, axis=1)
shear1 = 0.5 * (al11 - al22)
shear2 = 0.5 * (al21 + al12)
return shear1, shear2
def convert_convergence_to_deflection_ctypes(
kappa: np.ndarray, npix: int, opening_angle: float
) -> Tuple[np.ndarray, np.ndarray]:
"""
Args:
kappa:
Convergence map
opening_angle:
Edge length of field-of-view [angular distance]
npix:
Number of pixels along edge of field-of-view
Returns:
alpha1,2:
deflection angle in units of opening_angle
"""
alpha1, alpha2 = _call_alphas(
kappa, npix, (opening_angle).to(un.rad).value
)
return alpha1, alpha2
def _make_r_coor(
opening_angle: float, npix: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
""" Returns pixel coordinates """
ds = opening_angle / npix
x1edge = (
np.linspace(0, opening_angle - ds, npix)
- opening_angle / 2.0
+ ds / 2.0
)
x2, x1 = np.meshgrid(x1edge, x1edge)
return x1, x2, x1edge
gls = ct.CDLL(c_lib_path / "lib_so_cgls/libglsg.so")
gls.kappa0_to_alphas.argtypes = [
np.ctypeslib.ndpointer(dtype=ct.c_double),
ct.c_int,
ct.c_double,
np.ctypeslib.ndpointer(dtype=ct.c_double),
np.ctypeslib.ndpointer(dtype=ct.c_double),
]
gls.kappa0_to_alphas.restype = ct.c_void_p
def _call_alphas(
kappa: np.ndarray, npix: int, opening_angle: float
) -> Tuple[np.ndarray, np.ndarray]:
_kappa = np.array(kappa, dtype=ct.c_double)
alpha1 = np.array(np.zeros((npix, npix)), dtype=ct.c_double)
alpha2 = np.array( | np.zeros((npix, npix)) | numpy.zeros |
# this tells python to act as if though We are one folder up
import sys
sys.path.insert(0,'..')
import pandas as pd
import FixedEffectModelPyHDFE.api as FEM
from FixedEffectModelPyHDFE.DemeanDataframe import get_np_columns
#import FixedEffectModel.api as FEM
import numpy as np
from patsy import dmatrices
import statsmodels.formula.api as smf
import statsmodels.api as sm
from fastreg import linear
from datetime import datetime
import unittest
from math import isclose
NLS_WORK = "./../data/test_dropped_na.dta"
CEREAL = "./../data/cereal.dta"
AUTO = "./../data/auto_drop_na.dta"
TOLERANCE = 0.01
class FixedEffectsModelTestsVSfastreg(unittest.TestCase):
def setup(self, data_directory, target, regressors, absorb, cluster):
print(self._testMethodName)
print("target: ", target)
print("regressors: ", regressors)
print("absorb: ", absorb)
print("cluster: ", cluster)
df = pd.read_stata(data_directory)
df.reset_index(drop=True, inplace=True)
fem_start = datetime.now()
self.result = FEM.ols_high_d_category(df,
regressors,
target,
absorb,
cluster,
formula=None,
robust=False,
epsilon = 1e-8,
max_iter = 1e6)
fem_end = datetime.now()
print("FEM time taken: " + str(fem_end-fem_start))
self.result.summary()
print()
if absorb[0] == '0':
absorb=None
fastreg_start = datetime.now()
fastreg = linear.ols(y=target[0],
x=regressors,
absorb=absorb,
cluster=cluster,
data=df)
fastreg_end = datetime.now()
print(fastreg)
print("fastreg time taken: " + str(fastreg_end - fastreg_start))
print("\n\n\n\n\n")
#########################################################################
#########################################################################
def test_just_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['idcode', 'birth_yr', 'fifty_clusts', 'sixty_clusts'],
cluster=[])
def test_no_absorb_cluster_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['idcode', 'birth_yr', 'fifty_clusts', 'sixty_clusts'])
# comparing fvalue
def test_clustering_single_variable_no_absorb2_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['race'])
# comparing fvalue
assert(np.isclose(self.result.fvalue, 127593.72, atol=TOLERANCE))
# comparing standard errors
assert(np.all(np.isclose(self.result.bse, np.asarray([.148934, .0065111, .0113615]), atol=TOLERANCE)))
# comparing tvalues
assert(np.all(np.isclose(self.result.tvalues, np.asarray([27.75, 2.32, 66.61]), atol=TOLERANCE)))
def test_clustering_single_variable_no_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['fifty_clusts'])
assert(np.isclose(self.result.fvalue, 10230.63, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.048274, .0044294, .0052923]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([85.60, 3.42, 143.00]), atol=TOLERANCE)))
def test_clustering_two_variables_no_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['fifty_clusts', 'sixty_clusts'])
assert(np.isclose(self.result.fvalue, 12347.24, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.0518019, .0048228, .00492]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([79.77, 3.14, 153.82]), atol=TOLERANCE)))
def test_clustering_many_variables_no_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['0'],
cluster=['fifty_clusts', 'sixty_clusts', 'birth_yr', 'idcode'])
assert(np.isclose(self.result.fvalue, 4664.62, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.0551555, .0080815, .007881]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([74.92, 1.87, 96.03]), atol=TOLERANCE)))
def test_just_absorb_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['fifty_clusts', 'sixty_clusts', 'birth_yr', 'idcode'],
cluster=[])
assert(np.isclose(self.result.fvalue, 3891.51, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.0047052, .0096448]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([6.48, 88.22]), atol=TOLERANCE)))
def test_cluster_1_absorb_1_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['fifty_clusts'],
cluster=['sixty_clusts'])
assert(np.isclose(self.result.fvalue, 9884.24, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.004654, .0055812]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([3.18, 135.54]), atol=TOLERANCE)))
def test_cluster_1_absorb_1_2_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['fifty_clusts'],
cluster=['fifty_clusts'])
assert(np.isclose(self.result.fvalue, 10100.50, atol=TOLERANCE))
assert(np.all(np.isclose(self.result.bse, np.asarray([.0044538, .005324]), atol=TOLERANCE)))
assert(np.all(np.isclose(self.result.tvalues, np.asarray([3.33, 142.09]), atol=TOLERANCE)))
def test_cluster_many_absorb_1_nls_work_dataset(self):
self.setup(NLS_WORK,
target=['ttl_exp'],
regressors=['wks_ue', 'tenure'],
absorb=['fifty_clusts'],
cluster=['fifty_clusts', 'sixty_clusts', 'idcode', 'year'])
assert( | np.isclose(self.result.fvalue, 86.89, atol=TOLERANCE) | numpy.isclose |
# -*- coding: utf-8 -*-
"""
A class for working with a collection of spaCy docs. Includes functionality for
easily adding, getting, and removing documents; saving to / loading their data
from disk; and tracking basic corpus statistics.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import itertools
import logging
import math
import numpy as np
import spacy
import srsly
from cytoolz import itertoolz
from thinc.neural.ops import NumpyOps
from . import cache
from . import compat
from . import io as tio
from . import utils
LOGGER = logging.getLogger(__name__)
class Corpus(object):
"""
An ordered collection of :class:`spacy.tokens.Doc`, all of the same language
and sharing the same :class:`spacy.language.Language` processing pipeline
and vocabulary, with data held *in-memory*.
Initialize from a language / ``Language`` and (optionally) one or a stream
of texts or (text, metadata) pairs:
.. code-block:: pycon
>>> ds = textacy.datasets.CapitolWords()
>>> records = ds.records(limit=50)
>>> corpus = textacy.Corpus("en", data=records)
>>> corpus
Corpus(50 docs, 32175 tokens)
Add or remove documents, with automatic updating of corpus statistics:
.. code-block:: pycon
>>> texts = ds.texts(congress=114, limit=25)
>>> corpus.add(texts)
>>> corpus.add("If Burton were a member of Congress, here's what he'd say.")
>>> corpus
Corpus(76 docs, 55906 tokens)
>>> corpus.remove(lambda doc: doc._.meta.get("speaker_name") == "<NAME>")
>>> corpus
Corpus(61 docs, 48567 tokens)
Get subsets of documents matching your particular use case:
.. code-block:: pycon
>>> match_func = lambda doc: doc._.meta.get("speaker_name") == "<NAME>"
>>> for doc in corpus.get(match_func, limit=3):
... print(doc._.preview)
Doc(159 tokens: "Mr. Speaker, 480,000 Federal employees are work...")
Doc(336 tokens: "Mr. Speaker, I thank the gentleman for yielding...")
Doc(177 tokens: "Mr. Speaker, if we want to understand why in th...")
Get or remove documents by indexing, too:
.. code-block:: pycon
>>> corpus[0]._.preview
'Doc(159 tokens: "Mr. Speaker, 480,000 Federal employees are work...")'
>>> [doc._.preview for doc in corpus[:3]]
['Doc(159 tokens: "Mr. Speaker, 480,000 Federal employees are work...")',
'Doc(219 tokens: "Mr. Speaker, a relationship, to work and surviv...")',
'Doc(336 tokens: "Mr. Speaker, I thank the gentleman for yielding...")']
>>> del corpus[:5]
>>> corpus
Corpus(56 docs, 41573 tokens)
Compute basic corpus statistics:
.. code-block:: pycon
>>> corpus.n_docs, corpus.n_sents, corpus.n_tokens
(56, 1771, 41573)
>>> word_counts = corpus.word_counts(as_strings=True)
>>> sorted(word_counts.items(), key=lambda x: x[1], reverse=True)[:5]
[('-PRON-', 2553), ('people', 215), ('year', 148), ('Mr.', 139), ('$', 137)]
>>> word_doc_counts = corpus.word_doc_counts(weighting="freq", as_strings=True)
>>> sorted(word_doc_counts.items(), key=lambda x: x[1], reverse=True)[:5]
[('-PRON-', 0.9821428571428571),
('Mr.', 0.7678571428571429),
('President', 0.5),
('people', 0.48214285714285715),
('need', 0.44642857142857145)]
Save corpus data to and load from disk:
.. code-block:: pycon
>>> corpus.save("~/Desktop/capitol_words_sample.bin.gz")
>>> corpus = textacy.Corpus.load("en", "~/Desktop/capitol_words_sample.bin.gz")
>>> corpus
Corpus(56 docs, 41573 tokens)
Args:
lang (str or :class:`spacy.language.Language`):
Language with which spaCy processes (or processed) all documents
added to the corpus, whether as ``data`` now or later.
Pass a standard 2-letter language code (e.g. "en"),
or the name of a spacy language pipeline (e.g. "en_core_web_md"),
or an already-instantiated :class:`spacy.language.Language` object.
A given / detected language string is then used to instantiate
a corresponding ``Language`` with all default components enabled.
data (obj or Iterable[obj]): One or a stream of texts, records,
or :class:`spacy.tokens.Doc` s to be added to the corpus.
.. seealso:: :meth:`Corpus.add()`
Attributes:
lang (str)
spacy_lang (:class:`spacy.language.Language`)
docs (List[:class:`spacy.tokens.Doc`])
n_docs (int)
n_sents (int)
n_tokens (int)
"""
def __init__(self, lang, data=None):
self.spacy_lang = _get_spacy_lang(lang)
self.lang = self.spacy_lang.lang
self.docs = []
self._doc_ids = []
self.n_docs = 0
self.n_sents = 0
self.n_tokens = 0
if data:
self.add(data)
# dunder
def __repr__(self):
return "Corpus({} docs, {} tokens)".format(self.n_docs, self.n_tokens)
def __len__(self):
return self.n_docs
def __iter__(self):
for doc in self.docs:
yield doc
def __contains__(self, doc):
return id(doc) in self._doc_ids
def __getitem__(self, idx_or_slice):
return self.docs[idx_or_slice]
def __delitem__(self, idx_or_slice):
if isinstance(idx_or_slice, int):
self._remove_one_doc_by_index(idx_or_slice)
elif isinstance(idx_or_slice, slice):
start, end, step = idx_or_slice.indices(self.n_docs)
idxs = compat.range_(start, end, step)
self._remove_many_docs_by_index(idxs)
else:
raise TypeError(
"list indices must be integers or slices, not {}".format(type(idx_or_slice))
)
# add documents
def add(self, data, batch_size=1000):
"""
Add one or a stream of texts, records, or :class:`spacy.tokens.Doc` s
to the corpus, ensuring that all processing is or has already been done
by the :attr:`Corpus.spacy_lang` pipeline.
Args:
data (obj or Iterable[obj]):
str or Iterable[str]
Tuple[str, dict] or Iterable[Tuple[str, dict]]
:class:`spacy.tokens.Doc` or Iterable[:class:`spacy.tokens.Doc`]
batch_size (int)
See Also:
* :meth:`Corpus.add_text()`
* :meth:`Corpus.add_texts()`
* :meth:`Corpus.add_record()`
* :meth:`Corpus.add_records()`
* :meth:`Corpus.add_doc()`
* :meth:`Corpus.add_docs()`
"""
if isinstance(data, compat.unicode_):
self.add_text(data)
elif isinstance(data, spacy.tokens.Doc):
self.add_doc(data)
elif utils.is_record(data):
self.add_record(data)
elif isinstance(data, compat.Iterable):
first, data = itertoolz.peek(data)
if isinstance(first, compat.unicode_):
self.add_texts(data, batch_size=batch_size)
elif isinstance(first, spacy.tokens.Doc):
self.add_docs(data)
elif utils.is_record(first):
self.add_records(data, batch_size=batch_size)
else:
raise TypeError(
"data must be one of {} or an interable thereof, not {}".format(
{compat.unicode_, spacy.tokens.Doc, tuple},
type(data),
)
)
else:
raise TypeError(
"data must be one of {} or an interable thereof, not {}".format(
{compat.unicode_, spacy.tokens.Doc, tuple},
type(data),
)
)
def add_text(self, text):
"""
Add one text to the corpus, processing it into a :class:`spacy.tokens.Doc`
using the :attr:`Corpus.spacy_lang` pipeline.
Args:
text (str)
"""
self._add_valid_doc(self.spacy_lang(text))
def add_texts(self, texts, batch_size=1000):
"""
Add a stream of texts to the corpus, efficiently processing them into
:class:`spacy.tokens.Doc` s using the :attr:`Corpus.spacy_lang` pipeline.
Args:
texts (Iterable[str])
batch_size (int)
"""
for doc in self.spacy_lang.pipe(texts, as_tuples=False, batch_size=batch_size):
self._add_valid_doc(doc)
def add_record(self, record):
"""
Add one record to the corpus, processing it into a :class:`spacy.tokens.Doc`
using the :attr:`Corpus.spacy_lang` pipeline.
Args:
record (Tuple[str, dict])
"""
doc = self.spacy_lang(record[0])
doc._.meta = record[1]
self._add_valid_doc(doc)
def add_records(self, records, batch_size=1000):
"""
Add a stream of records to the corpus, efficiently processing them into
:class:`spacy.tokens.Doc` s using the :attr:`Corpus.spacy_lang` pipeline.
Args:
records (Iterable[Tuple[str, dict]])
batch_size (int)
"""
for doc, meta in self.spacy_lang.pipe(records, as_tuples=True, batch_size=batch_size):
doc._.meta = meta
self._add_valid_doc(doc)
def add_doc(self, doc):
"""
Add one :class:`spacy.tokens.Doc` to the corpus, provided it was processed
using the :attr:`Corpus.spacy_lang` pipeline.
Args:
doc (:class:`spacy.tokens.Doc`)
"""
if not isinstance(doc, spacy.tokens.Doc):
raise TypeError(
"doc must be a {}, not {}".format(spacy.tokens.Doc, type(doc))
)
if doc.vocab is not self.spacy_lang.vocab:
raise ValueError(
"doc.vocab ({}) must be the same as corpus.vocab ({})".format(
doc.vocab, self.spacy_lang.vocab,
)
)
self._add_valid_doc(doc)
def add_docs(self, docs):
"""
Add a stream of :class:`spacy.tokens.Doc` s to the corpus, provided
they were processed using the :attr:`Corpus.spacy_lang` pipeline.
Args:
doc (Iterable[:class:`spacy.tokens.Doc`])
"""
for doc in docs:
self.add_doc(doc)
def _add_valid_doc(self, doc):
self.docs.append(doc)
self._doc_ids.append(id(doc))
self.n_docs += 1
self.n_tokens += len(doc)
if doc.is_sentenced:
self.n_sents += itertoolz.count(doc.sents)
# get documents
def get(self, match_func, limit=None):
"""
Get all (or N <= ``limit``) docs in :class:`Corpus` for which
``match_func(doc)`` is True.
Args:
match_func (Callable): Function that takes a :class:`spacy.tokens.Doc`
as input and returns a boolean value. For example::
Corpus.get(lambda x: len(x) >= 100)
gets all docs with at least 100 tokens. And::
Corpus.get(lambda doc: doc._.meta["author"] == "<NAME>")
gets all docs whose author was given as '<NAME>'.
limit (int): Maximum number of matched docs to return.
Yields:
:class:`spacy.tokens.Doc`: Next document passing ``match_func``.
.. tip:: To get doc(s) by index, treat :class:`Corpus` as a list and use
Python's usual indexing and slicing: ``Corpus[0]`` gets the first
document in the corpus; ``Corpus[:5]`` gets the first 5; etc.
"""
matched_docs = (doc for doc in self if match_func(doc) is True)
for doc in itertools.islice(matched_docs, limit):
yield doc
# remove documents
def remove(self, match_func, limit=None):
"""
Remove all (or N <= ``limit``) docs in :class:`Corpus` for which
``match_func(doc)`` is True. Corpus doc/sent/token counts are adjusted
accordingly.
Args:
match_func (func): Function that takes a :class:`spacy.tokens.Doc`
and returns a boolean value. For example::
Corpus.remove(lambda x: len(x) >= 100)
removes docs with at least 100 tokens. And::
Corpus.remove(lambda doc: doc._.meta["author"] == "<NAME>")
removes docs whose author was given as "<NAME>".
limit (int): Maximum number of matched docs to remove.
.. tip:: To remove doc(s) by index, treat :class:`Corpus` as a list and use
Python's usual indexing and slicing: ``del Corpus[0]`` removes the
first document in the corpus; ``del Corpus[:5]`` removes the first
5; etc.
"""
matched_docs = (doc for doc in self if match_func(doc) is True)
self._remove_many_docs_by_index(
self._doc_ids.index(id(doc))
for doc in itertools.islice(matched_docs, limit)
)
def _remove_many_docs_by_index(self, idxs):
for idx in sorted(idxs, reverse=True):
self._remove_one_doc_by_index(idx)
def _remove_one_doc_by_index(self, idx):
doc = self.docs[idx]
self.n_docs -= 1
self.n_tokens -= len(doc)
if doc.is_sentenced:
self.n_sents -= itertoolz.count(doc.sents)
del self.docs[idx]
del self._doc_ids[idx]
# useful properties
@property
def vectors(self):
"""Constituent docs' word vectors stacked in a 2d array."""
return np.vstack((doc.vector for doc in self))
@property
def vector_norms(self):
"""Constituent docs' L2-normalized word vectors stacked in a 2d array."""
return np.vstack((doc.vector_norm for doc in self))
# useful methods
def word_counts(self, normalize="lemma", weighting="count", as_strings=False,
filter_stops=True, filter_punct=True, filter_nums=False):
"""
Map the set of unique words in :class:`Corpus` to their counts as
absolute, relative, or binary frequencies of occurence, similar to
:meth:`Doc._.to_bag_of_words() <textacy.spacier.doc_extensions.to_bag_of_words>`
but aggregated over all docs.
Args:
normalize (str): If "lemma", lemmatize words before counting; if
"lower", lowercase words before counting; otherwise, words are
counted using the form with which they appear.
weighting ({"count", "freq"}): Type of weight to assign to words.
If "count" (default), weights are the absolute number of
occurrences (count) of word in corpus.
If "freq", word counts are normalized by the total token count,
giving their relative frequencies of occurrence.
.. note:: The resulting set of frequencies won't (necessarily) sum
to 1.0, since punctuation and stop words are filtered out after
counts are normalized.
as_strings (bool): If True, words are returned as strings; if False
(default), words are returned as their unique integer ids.
filter_stops (bool): If True (default), stop word counts are removed.
filter_punct (bool): If True (default), punctuation counts are removed.
filter_nums (bool): If True, number counts are removed.
Returns:
dict: mapping of a unique word id or string (depending on the value
of ``as_strings``) to its absolute, relative, or binary frequency
of occurrence (depending on the value of ``weighting``).
See Also:
* :func:`textacy.vsm.get_term_freqs() <textacy.vsm.matrix_utils.get_term_freqs>`
"""
word_counts_ = collections.Counter()
for doc in self:
word_counts_.update(
doc._.to_bag_of_words(
normalize=normalize, weighting="count", as_strings=as_strings,
filter_stops=filter_stops, filter_punct=filter_punct,
filter_nums=filter_nums
)
)
if weighting == "count":
word_counts_ = dict(word_counts_)
elif weighting == "freq":
n_tokens = self.n_tokens
word_counts_ = {
word: count / n_tokens for word, count in word_counts_.items()
}
else:
raise ValueError(
"weighting='{}' is invalid; valid values are {}".format(
weighting, {"count", "freq"}
)
)
return word_counts_
def word_doc_counts(
self, normalize="lemma", weighting="count", smooth_idf=True, as_strings=False,
filter_stops=True, filter_punct=True, filter_nums=True):
"""
Map the set of unique words in :class:`Corpus` to their *document* counts
as absolute, relative, inverse, or binary frequencies of occurence.
Args:
normalize (str): If "lemma", lemmatize words before counting; if
"lower", lowercase words before counting; otherwise, words are
counted using the form with which they appear.
weighting ({"count", "freq", "idf"}): Type of weight to assign to words.
If "count" (default), weights are the absolute number (count)
of documents in which word appears. If "freq", word doc counts
are normalized by the total document count, giving their relative
frequencies of occurrence. If "idf", weights are the log of the
inverse relative frequencies: ``log(n_docs / word_doc_count)``
or (if ``smooth_idf`` is True) ``log(1 + (n_docs / word_doc_count))`` .
smooth_idf (bool): If True, add 1 to all word doc counts when
calculating "idf" weighting, equivalent to adding a single
document to the corpus containing every unique word.
as_strings (bool): If True, words are returned as strings; if False
(default), words are returned as their unique integer ids
filter_stops (bool): If True (default), stop word counts are removed.
filter_punct (bool): If True (default), punctuation counts are removed.
filter_nums (bool): If True, number counts are removed.
Returns:
dict: mapping of a unique word id or string (depending on the value
of ``as_strings``) to the number of documents in which it appears
weighted as absolute, relative, or binary frequencies (depending
on the value of ``weighting``).
See Also:
* :func:`textacy.vsm.get_doc_freqs() <textacy.vsm.matrix_utils.get_doc_freqs>`
"""
word_doc_counts_ = collections.Counter()
for doc in self:
word_doc_counts_.update(
doc._.to_bag_of_words(
normalize=normalize, weighting="binary", as_strings=as_strings,
filter_stops=filter_stops, filter_punct=filter_punct,
filter_nums=filter_nums
)
)
if weighting == "count":
word_doc_counts_ = dict(word_doc_counts_)
elif weighting == "freq":
n_docs = self.n_docs
word_doc_counts_ = {
word: count / n_docs for word, count in word_doc_counts_.items()
}
elif weighting == "idf":
n_docs = self.n_docs
if smooth_idf is True:
word_doc_counts_ = {
word: math.log(1 + (n_docs / count))
for word, count in word_doc_counts_.items()
}
else:
word_doc_counts_ = {
word: math.log(n_docs / count)
for word, count in word_doc_counts_.items()
}
else:
raise ValueError(
"weighting='{}' is invalid; valid values are {}".format(
weighting, {"count", "freq", "idf"}
)
)
return word_doc_counts_
# file io
def save(self, filepath):
"""
Save :class:`Corpus` to disk as binary data.
Args:
filepath (str): Full path to file on disk where :class:`Corpus` data
will be saved as a binary file.
See Also:
:meth:`Corpus.load()`
"""
attrs = [
spacy.attrs.ORTH,
spacy.attrs.SPACY,
spacy.attrs.LEMMA,
spacy.attrs.ENT_IOB,
spacy.attrs.ENT_TYPE,
]
if self[0].is_tagged:
attrs.append(spacy.attrs.TAG)
if self[0].is_parsed:
attrs.append(spacy.attrs.HEAD)
attrs.append(spacy.attrs.DEP)
else:
attrs.append(spacy.attrs.SENT_START)
tokens = []
lengths = []
strings = set()
user_datas = []
for doc in self:
tokens.append(doc.to_array(attrs))
lengths.append(len(doc))
strings.update(tok.text for tok in doc)
user_datas.append(doc.user_data)
msg = {
"meta": self.spacy_lang.meta,
"attrs": attrs,
"tokens": np.vstack(tokens).tobytes("C"),
"lengths": np.asarray(lengths, dtype="int32").tobytes("C"),
"strings": list(strings),
"user_datas": user_datas,
}
with tio.open_sesame(filepath, mode="wb") as f:
f.write(srsly.msgpack_dumps(msg))
@classmethod
def load(cls, lang, filepath):
"""
Load previously saved :class:`Corpus` binary data, reproduce the original
`:class:`spacy.tokens.Doc`s tokens and annotations, and instantiate
a new :class:`Corpus` from them.
Args:
lang (str or :class:`spacy.language.Language`)
filepath (str): Full path to file on disk where :class:`Corpus` data
was previously saved as a binary file.
Returns:
:class:`Corpus`
See Also:
:meth:`Corpus.save()`
"""
spacy_lang = _get_spacy_lang(lang)
with tio.open_sesame(filepath, mode="rb") as f:
msg = srsly.msgpack_loads(f.read())
if spacy_lang.meta != msg["meta"]:
LOGGER.warning("the spacy langs are different!")
for string in msg["strings"]:
spacy_lang.vocab[string]
attrs = msg["attrs"]
lengths = np.frombuffer(msg["lengths"], dtype="int32")
flat_tokens = | np.frombuffer(msg["tokens"], dtype="uint64") | numpy.frombuffer |
from mlpractice.stats.stats_utils import print_stats, _update_stats
from mlpractice.utils import ExceptionInterception
try:
from mlpractice_solutions.mlpractice_solutions.\
linear_classifier_solution import softmax_with_cross_entropy
except ImportError:
softmax_with_cross_entropy = None
import torch
import numpy as np
def test_all(softmax_with_cross_entropy=softmax_with_cross_entropy):
test_interface(softmax_with_cross_entropy)
test_public(softmax_with_cross_entropy)
test_normalization(softmax_with_cross_entropy)
test_random(softmax_with_cross_entropy, 100)
print('All tests passed!')
_update_stats('linear_classifier', 'softmax_with_cross_entropy')
print_stats('linear_classifier')
def test_interface(softmax_with_cross_entropy=softmax_with_cross_entropy):
with ExceptionInterception():
predictions1 = np.array([1, 2, 3])
target_index1 = np.array([2])
predictions2 = np.array([[1, 2, 3],
[1, 2, 3]])
target_index2 = np.array([2, 2])
loss1, d_predictions1 = softmax_with_cross_entropy(
predictions1, target_index1,
)
loss2, d_predictions2 = softmax_with_cross_entropy(
predictions2, target_index2,
)
assert isinstance(loss1, float), \
"softmax_with_cross_entropy must return a float and an ndarray"
assert isinstance(loss2, float), \
"softmax_with_cross_entropy must return a float and an ndarray"
assert isinstance(d_predictions1, np.ndarray), \
"softmax_with_cross_entropy must return a float and an ndarray"
assert isinstance(d_predictions2, np.ndarray), \
"softmax_with_cross_entropy must return a float and an ndarray"
assert d_predictions1.shape == predictions1.shape, \
"The output d_predictions shape must match the predictions shape"
assert d_predictions2.shape == predictions2.shape, \
"The output d_predictions shape must match the predictions shape"
def test_public(softmax_with_cross_entropy=softmax_with_cross_entropy):
with ExceptionInterception():
predictions = np.array([1, 2, 3])
target_index = np.array([2])
loss, d_predictions = softmax_with_cross_entropy(
predictions,
target_index,
)
predictions_tensor = torch.from_numpy(
predictions[np.newaxis, :],
).float()
predictions_tensor.requires_grad = True
sample_loss = torch.nn.CrossEntropyLoss(reduction='sum')
sample_output = sample_loss(predictions_tensor,
torch.from_numpy(target_index).long())
sample_output.backward()
assert abs(loss - sample_output) < 10 ** -6
assert np.all(np.abs(d_predictions
- predictions_tensor.grad.numpy()) < 10 ** -6)
def test_normalization(softmax_with_cross_entropy=softmax_with_cross_entropy):
with ExceptionInterception():
predictions = np.array([0, 0, 10000])
target_index = np.array([2])
loss, d_predictions = softmax_with_cross_entropy(
predictions,
target_index,
)
predictions_tensor = torch.from_numpy(
predictions[np.newaxis, :],
).float()
predictions_tensor.requires_grad = True
sample_loss = torch.nn.CrossEntropyLoss(reduction='sum')
sample_output = sample_loss(predictions_tensor,
torch.from_numpy(target_index).long())
sample_output.backward()
assert abs(loss - sample_output) < 10 ** -6
assert np.all(np.abs(d_predictions
- predictions_tensor.grad.numpy()) < 10 ** -6)
def test_random(
softmax_with_cross_entropy=softmax_with_cross_entropy,
iterations=1,
):
with ExceptionInterception():
np.random.seed(42)
for _ in range(iterations):
predictions = np.random.rand(3, 4)
target_index = | np.random.randint(0, 4, size=3) | numpy.random.randint |
import argparse
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
from tqdm import tqdm
from timeit import default_timer as timer
import utils.tfrecord_utils as tfrecord_utils
from tensorflow.python.client import timeline
def log_string(out_str):
#LOG_FOUT.write(out_str + '\n')
#LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch, batch_size,
base_learning_rate, learning_rate_decay_rate, learning_rate_decay_step):
learning_rate = tf.train.exponential_decay(base_learning_rate, # Base learning rate.
batch * batch_size, # Current index into the dataset.
learning_rate_decay_step, # Decay step.
learning_rate_decay_rate, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch, batch_size,
bn_init_decay, bn_decay_decay_rate,
bn_decay_decay_step, bn_decay_clip):
bn_momentum = tf.train.exponential_decay(bn_init_decay, batch * batch_size, bn_decay_decay_step,
bn_decay_decay_rate, staircase=True)
bn_decay = tf.minimum(bn_decay_clip, 1 - bn_momentum)
return bn_decay
def train(tfrecords_path,
model_name,
batch_size=16, base_learning_rate=0.001, learning_rate_decay_rate=0.8,
gpu_index=0, optimizer_name='adam', log_dir='log', epochs=100,
bn_init_decay=0.5, bn_decay_decay_rate=0.5, bn_decay_decay_step=200000, bn_decay_clip=0.99,
pretrained_weights_file_path=None):
"""
Args:
tfrecords_path (str): Top level input path where are train/test dirs with tfrecords.
batch_size (int): Batch Size during training [default: 32].
base_learning_rate (float): Initial learning rate [default: 0.001].
gpu_index (int): GPU index to use [default: GPU 0].
"""
# TFRecords paths
NUM_CLASSES = 51
TFRECORDS_TRAIN_DIRPATH = os.path.join(tfrecords_path, 'train')
TFRECORDS_TEST_DIRPATH = os.path.join(tfrecords_path, 'test')
# Import model module
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'models', model_name))
MODEL = importlib.import_module(model_name)
# Log dir
if not os.path.exists(log_dir):
os.mkdir(log_dir)
os.mkdir(os.path.join(log_dir, 'timeline'))
with tf.Graph().as_default():
#######################################################################
# TFRecords
#######################################################################
# Train filenames
tfrecord_filenames_train = [f for f in os.listdir(TFRECORDS_TRAIN_DIRPATH) if '.tfrecord' in f]
tfrecord_filenames_train.sort()
tfrecord_filepaths_train = [os.path.join(TFRECORDS_TRAIN_DIRPATH, f) for f in tfrecord_filenames_train]
# Train filenames
tfrecord_filenames_test = [f for f in os.listdir(TFRECORDS_TEST_DIRPATH) if '.tfrecord' in f]
tfrecord_filenames_test.sort()
tfrecord_filepaths_test = [os.path.join(TFRECORDS_TEST_DIRPATH, f) for f in tfrecord_filenames_test]
# TFdataset
tfrecord_filepaths_placeholder = tf.placeholder(tf.string, [None])
tfdataset = tf.data.TFRecordDataset(tfrecord_filepaths_placeholder)
tfdataset = tfdataset.shuffle(buffer_size=batch_size*10) # Only one tfrecord file -> no action
tfdataset = tfdataset.map(tfrecord_utils.tfexample_to_depth_image, num_parallel_calls=4)
#######################################################################
# Unorganized point cloud
#######################################################################
# # Load
# tfdataset = tfdataset.map(lambda a, b: tf.py_func(tfrecord_utils.load_depth_and_create_point_cloud_data_rnd,
# [a['pcd_path'], a['img_path'], a['loc_path'], b['name'],
# b['int'], NUM_POINT],
# [tf.float32, tf.string, tf.string, tf.string, tf.int64]))
# # Augment
# tfdataset = tfdataset.map(lambda a, b, c, d, e:
# tf.py_func(tfrecord_utils.augment_point_cloud, [a, b, c, d, e, True, True, False],
# [tf.float32, tf.string, tf.string, tf.string, tf.int64]))
#######################################################################
# Organized point cloud
#######################################################################
# Settings
data_channels = 3
data_height = 224
data_width = 224
zero_mean = True
unit_ball = True
# # Load data
# tfdataset = tfdataset.map(lambda a, b: tf.py_func(tfrecord_utils.load_depth_and_create_organized_point_cloud,
# [a['pcd_path'], a['img_path'], a['loc_path'], b['name'],
# b['int'], data_height, zero_mean, unit_ball],
# [tf.float32, tf.string, tf.string, tf.string, tf.int64]),
# num_parallel_calls=4)
# Load data
tfdataset = tfdataset.map(lambda a, b: tf.py_func(tfrecord_utils.create_organized_point_cloud,
[a['depth-image'], a['depth-image-loc'], b['name'], b['int'],
data_height, zero_mean, unit_ball], [tf.float32, tf.int64,
tf.string, tf.int64]),
num_parallel_calls=4)
#######################################################################
# Depth image
#######################################################################
# # Load data
# data_channels = 1
# data_height = 299
# data_width = 299
# data_scale = 1.0 # max depth from kinect is 10m, so 0.1 gives us range of 0-1
# # data_mean = 775.6092 # None if zero, sample specific if below zero, given value otherwise
# # data_std = 499.1676 # None if zero, sample specific if below zero, given value otherwise
# data_mean = 775.6092 - 499.1676 # To be in the range of 0-1
# data_std = 499.1676 * 2 # To be in the range of 0-1
# tfdataset = tfdataset.map(lambda a, b: tf.py_func(tfrecord_utils.load_depth,
# [a['pcd_path'], a['img_path'], a['loc_path'], b['name'],
# b['int'], data_height, data_scale, data_mean, data_std],
# [tf.float32, tf.string, tf.string, tf.string, tf.int64]),
# num_parallel_calls=4)
# # Tile
# if data_channels == 3:
# tfdataset = tfdataset.map(lambda a, b, c, d, e:
# tf.py_func(tfrecord_utils.tile_depth_image, [a, b, c, d, e],
# [tf.float32, tf.string, tf.string, tf.string, tf.int64]))
# # Augment
# tfdataset = tfdataset.map(lambda a, b, c, d, e:
# tf.py_func(tfrecord_utils.augment_depth_image, [a, b, c, d, e],
# [tf.float32, tf.string, tf.string, tf.string, tf.int64]))
# Transformations
tfdataset = tfdataset.shuffle(buffer_size=batch_size * 2)
tfdataset = tfdataset.batch(batch_size=batch_size, drop_remainder=True)
tfdataset = tfdataset.prefetch(10)
# Iterator
data_iterator = tfdataset.make_initializable_iterator()
data_pcd, _, _, data_y_int = data_iterator.get_next()
data_pcd = tf.reshape(data_pcd, (batch_size, data_height, data_width, data_channels))
#######################################################################
# Network architecture
#######################################################################
with tf.device('/gpu:' + str(gpu_index)):
is_training_pl = tf.Variable(True, trainable=False, dtype=tf.bool)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0, trainable=False)
bn_decay = get_bn_decay(batch, batch_size=batch_size,
bn_init_decay=bn_init_decay, bn_decay_decay_rate=bn_decay_decay_rate,
bn_decay_decay_step=bn_decay_decay_step, bn_decay_clip=bn_decay_clip)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(data_pcd, is_training_pl, num_classes=NUM_CLASSES,
bn_decay=bn_decay, with_bn=False)
loss = MODEL.get_loss(pred, data_y_int, end_points, num_classes=NUM_CLASSES)
# # Number of trainable weights
# trainable_weights_no = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
# print('trainable_weights_no: {}'.format(trainable_weights_no))
# exit(0)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(data_y_int))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(batch_size)
tf.summary.scalar('accuracy', accuracy)
# Get learning rate
learning_rate = get_learning_rate(batch, batch_size=batch_size,
base_learning_rate=base_learning_rate,
learning_rate_decay_rate=learning_rate_decay_rate,
learning_rate_decay_step=bn_decay_decay_step)
tf.summary.scalar('learning_rate', learning_rate)
# OPTIMIZATION - Also updates batchnorm operations automatically
with tf.variable_scope('opt') as scope:
if optimizer_name == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif optimizer_name == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) # for batchnorm
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step=batch)
# Load weghts from checkpoint
if model_name == 'inception_v3' and pretrained_weights_file_path is not None:
# Lists of scopes of weights to include/exclude from pretrained snapshot
pretrained_include = ["InceptionV3"]
pretrained_exclude = ["InceptionV3/AuxLogits", "InceptionV3/Logits"]
# PRETRAINED SAVER - For loading pretrained weights on the first run
pretrained_vars = tf.contrib.framework.get_variables_to_restore(include=pretrained_include,
exclude=pretrained_exclude)
tf_pretrained_saver = tf.train.Saver(pretrained_vars, name="pretrained_saver")
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
#######################################################################
# Create session
#######################################################################
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(log_dir, 'train'), sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(log_dir, 'test'))
# Init variables
sess.run(tf.global_variables_initializer())
# Profiling
sess_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
sess_metadata = tf.RunMetadata()
# print('VARS NO: {}'.format(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])))
# exit()
# Restore?
# saver.restore(sess, tf.train.latest_checkpoint('log'))
if model_name == 'inception_v3' and pretrained_weights_file_path is not None:
sess.run(tf.global_variables_initializer())
tf_pretrained_saver.restore(sess, pretrained_weights_file_path)
ops = {'is_training_pl': is_training_pl, 'pred': pred,
'loss': loss, 'train_op': train_op, 'merged': merged, 'step': batch,
# 'pointclouds_pl': pointclouds_pl, 'labels_pl': labels_pl,
'data_y_int': data_y_int, 'data_pcd': data_pcd,
'batch_size': batch_size, 'num_classes': NUM_CLASSES,
'sess_options': sess_options, 'sess_metadata': sess_metadata,
'log_dir': log_dir,
}
for epoch in range(epochs):
log_string('**** EPOCH %03d ****' % epoch)
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer, data_iterator, tfrecord_filepaths_train,
tfrecord_filepaths_placeholder)
eval_one_epoch(sess, ops, test_writer, data_iterator, tfrecord_filepaths_test,
tfrecord_filepaths_placeholder)
# Save the variables to disk.
save_path = saver.save(sess, os.path.join(log_dir, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer, data_iterator, tfrecord_filepaths_train,
tfrecord_filepaths_placeholder):
""" ops: dict mapping from string to tf ops """
# Iterate over the all datapoints
total_correct = 0.
total_seen = 0.
loss_sum = 0.
# Reset train data
sess.run(data_iterator.initializer, feed_dict={tfrecord_filepaths_placeholder: tfrecord_filepaths_train})
# Set trainable weights
sess.run(ops['is_training_pl'].assign(True))
pbar = tqdm(desc='', unit='tick')
try:
while True:
# Train it
batch_train_start = timer()
summary, step, _, loss_val, pred_val, current_label = sess.run(
[ops['merged'], ops['step'], ops['train_op'], ops['loss'], ops['pred'], ops['data_y_int']],
options=ops['sess_options'], run_metadata=ops['sess_metadata'])
batch_train_end = timer()
# Profiling
fetched_timeline = timeline.Timeline(ops['sess_metadata'].step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
with open(os.path.join(ops['log_dir'], 'timeline', 'timeline_02_step_%d.json' % step), 'w') as f:
f.write(chrome_trace)
# Print predited value and label
# print('pred_val: {} curr_lab: {}'.format(pred_val[0], current_label[0]))
# Some acc calulation
train_writer.add_summary(summary, step)
pred_val = | np.argmax(pred_val, 1) | numpy.argmax |
"""
Compute global or local influence.
"""
import os
import sys
import time
import joblib
import argparse
import resource
from datetime import datetime
import numpy as np
from sklearn.base import clone
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here + '/../../')
import intent
import util
from influence import select_elements
def get_pred(objective, model, X, y):
"""
Return predictions of shape=(X.shape[0], no. class).
"""
if objective == 'regression':
pred = model.predict(X)
elif objective == 'binary':
pred = model.predict_proba(X)[:, 1]
elif objective == 'multiclass':
pred = model.predict_proba(X)
else:
raise ValueError(f'objective {objective} unknown!')
return pred
def get_ranking(inf_obj, method, params, agg, tree, X_train, y_train, X, y, logger=None):
"""
Return influence values.
"""
# fit explainer
start = time.time()
explainer = intent.TreeExplainer(method, params, logger).fit(tree, X_train, y_train)
fit_time = time.time() - start - explainer.parse_time_
if logger:
logger.info(f'\n[INFO] explainer fit time: {fit_time:.5f}s')
# compute infu
start2 = time.time()
if inf_obj == 'local':
influence = explainer.get_local_influence(X, y)
if agg == 'sum':
influence = np.sum(influence, axis=1) # shape=(no. train,)
elif agg == 'abs_sum':
influence = np.sum(np.abs(influence), axis=1) # shape=(no. train,)
elif agg == 'mean':
influence = np.mean(influence, axis=1) # shape=(no. train,)
else:
assert agg == 'abs_mean'
influence = np.mean(np.abs(influence), axis=1) # shape=(no. train,)
else:
assert inf_obj == 'global'
influence = explainer.get_global_influence()
inf_time = time.time() - start2
if logger:
logger.info(f'[INFO] explainer influence time: {inf_time:.5f}s')
ranking = np.argsort(np.abs(influence))
return ranking
def remove_and_evaluate(args, objective, params, tree, X_train, y_train, X_test, y_test, test_idxs, logger):
# initial predictions
pred = get_pred(objective, tree, X_test, y_test)
# get list of remove fractions
remove_frac_arr = np.linspace(0, args.remove_frac, args.n_ckpt + 1)
n_remove = int(args.remove_frac * X_train.shape[0] / args.n_ckpt)
# # result container
# result = {}
# result['remove_frac'] = remove_frac_arr
# result['loss'] = np.full(remove_frac_arr.shape[0], np.nan, dtype=np.float32)
# result['pred'] = []
new_X_train = X_train.copy()
new_y_train = y_train.copy()
new_tree = clone(tree).fit(new_X_train, new_y_train)
for i in range(1, args.n_ckpt):
ranking = get_ranking(args.inf_obj, args.method, params, args.agg, new_tree,
new_X_train, new_y_train, X_test[test_idxs], y_test[test_idxs], logger=None)
new_X_train = np.delete(new_X_train, ranking[:n_remove], axis=0)
new_y_train = np.delete(new_y_train, ranking[:n_remove])
if objective == 'binary' and len(np.unique(new_y_train)) == 1:
logger.info('Only samples from one class remain!')
break
elif objective == 'multiclass' and len( | np.unique(new_y_train) | numpy.unique |
# -*- coding: utf-8 -*-
"""
File Name: pipeline
Description : 处理流程
Author : mick.yi
date: 2019/1/3
"""
import tensorflow as tf
import numpy as np
import h5py
import keras
from keras.layers import Input
from keras.models import Model
from keras.optimizers import SGD
import keras.backend as K
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TensorBoard
import sys
import os
import time
import argparse
if __package__ is None:
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
__package__ = "keras-lbl-IvS"
from .config import current_config as config
from .utils import random_select, get_weights, update_weights, update_queue
from .faiss_utils import get_index, update_multi
from .reader import get_mslm_infos, load_img
from .layers import DenseWithDPSoftmaxLoss
def set_gpu_growth():
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
cfg = tf.ConfigProto()
cfg.gpu_options.allow_growth = True
session = tf.Session(config=cfg)
K.set_session(session)
def generator(images_info, label_id_dict, dominant_queue, num_class, batch_size):
"""
训练样本生成器
:param images_info: 图像的元数据信息
:param label_id_dict: 类别和对应的图像id字典
:param dominant_queue: 支配队列; 类别和对应的支配类别
:param num_class: 类别数
:param batch_size: batch_size
:return:
"""
while True:
# 两级采样,首先采样batch_size/2个正类别;然后随机采样支配类别
sample_labels = np.random.choice(num_class, batch_size // 2, replace=False) # 无放回抽样
selected_labels = set(sample_labels) # 保存当前选中类别set
selected_image_labels = [] # 保存当前step选中的图像和对应的类别标签
# 首先选择正原型
for label in sample_labels:
selected_image_id = random_select(label_id_dict[label])
selected_image_labels.append([selected_image_id, label])
# 再选择相关的支配原型,直到mini-batch大小
while len(selected_image_labels) < batch_size:
# 随机采样当前正原型
label = random_select(sample_labels)
# 随机选择支配类别,不能是之前已经选择过的
dq_label = random_select(dominant_queue[label])
while dq_label in selected_labels:
dq_label = random_select(dominant_queue[label])
selected_labels.add(dq_label)
# 选择支配类别的图像
selected_image_id = random_select(label_id_dict[dq_label])
selected_image_labels.append([selected_image_id, dq_label])
# 当前选中标签
selected_image_labels = np.asarray(selected_image_labels) # 转为numpy数组
current_selected_labels = selected_image_labels[:, 1]
current_weights = get_weights(config.pw_h5_file[config.prototype_weights_dataset],
current_selected_labels)
# 加载图像
images = [load_img(images_info[image_id]['img_path']) for image_id, label in selected_image_labels]
images = np.asarray(images)
# 返回当前mini-batch
current_selected_labels = np.expand_dims(current_selected_labels, axis=1)
# print("current_selected_labels.shape:{}".format(current_selected_labels.shape))
# print("images.shape:{},type(images):{}".format(images.shape, type(images)))
yield [images,
current_weights,
current_selected_labels], np.arange(batch_size) # 标签类别永远是0~batch_size-1
def init_queue(index, weights_set, num_class, dq_num, cq_num):
"""
初始化候选队列和支配队列
:param index:
:param weights_set: h5py dataset对象
:param num_class:
:param dq_num:
:param cq_num:
:return:
"""
data, candidate_label_idx = index.search(weights_set[:num_class], cq_num)
dominant_label_idx = candidate_label_idx[:, :dq_num] # 候选队列包含支配队列
# 转为字典类型
dominant_queue = dict(enumerate(dominant_label_idx))
candidate_queue = dict(enumerate(candidate_label_idx))
return dominant_queue, candidate_queue
def init_prototype(images_info, label_id_dict, num_class):
inputs = Input(batch_shape=(config.batch_size,) + config.input_shape)
features = config.backbone(inputs)
model = Model(inputs, features)
model.load_weights(config.backbone_weights, by_name=True)
# 原型权重一份放到hdf5,一份存放到faiss中(faiss中保留的不是精准的)
f_label = h5py.File(config.prototype_weights_hdf5, 'w')
label_feature = f_label.create_dataset(config.prototype_weights_dataset,
shape=(num_class, 512), dtype='f')
# 逐个类别处理
for label in range(num_class):
# 获取某个label的所有图像,并使用模型预测图像的特征,最后求均值作为label的原型权重
image_ids = label_id_dict[label] # 图像id
images = [load_img(images_info[image_id]['img_path']) for image_id in image_ids] # 图像数据
features = model.predict(np.asarray(images)) # 输出特征
features = keras.utils.np_utils.normalize(features) # 归一化
features = np.mean(features, axis=0) # 求均值
features = keras.utils.np_utils.normalize(features) # 再次归一化; 是二维的
# 赋值给hdf5
label_feature[label] = features[0] # (1,d) 转为 (d,)
# 每1w次,刷写到磁盘
if label % 500 == 0:
f_label.flush()
print("{} init_prototype 完成:{}".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
label))
# 最后关闭文件
f_label.close()
def get_prototype(deal_batch=1000):
f_label = h5py.File(config.prototype_weights_hdf5, 'r+')
dset = f_label[config.prototype_weights_dataset]
length = len(dset)
index = get_index(512)
# 逐个类别处理,更新faiss index
for batch_no in range(length // deal_batch):
start = batch_no * deal_batch
end = (batch_no + 1) * deal_batch
features = dset[start:end]
update_multi(index, features, np.arange(start, end))
# 处理不能整除的情况
if not length % deal_batch == 0:
start = length - length % deal_batch
end = length
features = dset[start:end]
update_multi(index, features, np.arange(start, end))
return f_label, index
def label_id_map(images_info, num_class):
"""
将图像按照类别分组
:param images_info: 图像字典{'img_path': 图像路径,'label': 类别,'img_id':图像id}
:param num_class: 类别数
:return:
"""
# 初始化
label_id_dict = dict()
for i in range(num_class):
label_id_dict[i] = []
# 逐个图像归类
for i in range(len(images_info)):
label = int(images_info[i]['label'])
img_id = images_info[i]['img_id']
label_id_dict[label].append(img_id)
return label_id_dict
class ExportWeights(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
layer = self.model.layers[-1]
trained_weights, current_trained_labels, y_pred = layer.get_weights()[:3]
trained_weights = | np.transpose(trained_weights) | numpy.transpose |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 3 07:52:07 2020
@author: <NAME>
This code is written based on WEC-sim.
<EMAIL>
Note: readData method is included to import data type of both .txt and .mat
Note: irregularWaveSpectrum method has been modified for the faster computation.
The original method from WEC-sim is commented.
Note: MATLAB column vectors are changed to array for faster computation
Note: waveElevationGrid method and write_paraview_vtp_wave method is moved to
paraviewClass.py
Note: Values are equal to tolerance rtol=1e-07, atol=0
Values will not be exact to WEC-sim due to difference in significant figures
or the way some methods are used.
(e.g)
integrate.cumtrapz(S_f,freq) and MATLAB cumtrapz(freq,S_f) does not produce
same results but silimar values to rtol=1e-06
Note: "RuntimeWarning: overflow encountered in sinh"
When using irregular wave or spectrumImport, Equal Energy with wDepth value
over 100 can generate "RuntimeWarning: overflow encountered in sinh" as the
value of sinh in waveSetup method reaches infinity.
"""
from scipy import integrate
import matplotlib.pyplot as plt
import numpy as np
import numpy.matlib
import warnings
import scipy.io as sio
class WaveClass:
def inputProperties(self):
"""
Set up necessary properties of the wave condition based on input file
Returns
-------
wType, T, H, spectrumType, gamma, phaseSeed, spectrumDataFile,
etaDataFile, numFreq, waveDir, waveSpread, viz, statisticsDataLoad,
freqDisc, wavegauge1loc, wavegauge2loc, wavegauge3loc, currentSpeed,
currentDirection, currentOption, currentDepth
"""
#wType - String defining the type of waves to be generated.
#Can be one of: 'noWave', 'noWaveCIC', 'regular', 'regularCIC','irregular',
#'spectrumImport', and 'etaImport'
#(Default = 'NOT DEFINED').
self.wType = 'NOT DEFINED'
#T - [s] Wave period, peak wave period or BEM period.
#Wave period (regular waves), peak period (irregular waves), or period of
#BEM data used for hydrodynamic coefficients ('noWave')
#(Default = 'NOT DEFINED').
self.T = 'NOT DEFINED'
#H - % [m] Wave height or significant wave height for irregular
#Wave height (regular waves) or significant wave height (irregular waves)
#(Default = 'NOT DEFINED').
self.H = 'NOT DEFINED'
#spectrumType - String containing the wave spectrum type
#Can be one of : 'PM', 'BS', and 'JS'
#(Default = 'NOT DEFINED').
self.spectrumType = 'NOT DEFINED'
#gamma - Only used for 'JS' spectrum type to define gamma
#(Default = 3.3)
self.gamma = 3.3
#phaseSeed - Only used for irregular waves
#if equal to 1,2,3,...,etc, the waves phase is seeded.
#(Default = 0)
self.phaseSeed = 0
#spectrumDataFile - Data file that contains the spectrum data file
#(Default = 'NOT DEFINED')
self.spectrumDataFile = 'NOT DEFINED'
#etaDataFile - Data file that contains the times-series data file
#(Default = 'NOT DEFINED')
self.etaDataFile = 'NOT DEFINED'
#freqRange - Min and max frequency for irregular waves.
#array with two values, rad/s, (default = frequency range in BEM data)
#(Default = [])
#eg. [0.3,0.5]
self.freqRange = []
#numFreq - of interpolated wave frequencies
#Number of frequencies used, varies depending on method:
#Traditional = 1000, EqualEnergy = 500 or 'Imported'
#(Default = 0)
self.numFreq = 0
#waveDir - [deg] Incident wave direction(s)
#Should be defined as a column vector for more than one wave direction
#For multiple wave direction it will be an array of angle in degree
#[20,45,60,80]
#(Default = 0)
self.waveDir = [0]
#waveSpread - Wave Spread probability associated with wave direction(s)
#Should be defined as a column vector for more than one wave direction
#For multiple wave direction it will be an array that corresponds to
#wave direction [20,45,60,80]
#(Default = [1])
self.waveSpread = [1]
#viz - Dictionary defining visualization options
#Should be a dictionary containing the fields 'numPointsX' and 'numPointsY'.
#numPointsX is the number of visualization points in x direction, and
#numPointsY the number of visualization points in y direction
self.viz = {'numPointsX': 50,'numPointsY': 50 }
#statisticsDataLoad - File name from which to load wave statistics data
#(Default = [])
self.statisticsDataLoad = []
#freqDisc - Method of frequency discretization for irregular waves.
#Options for this variable are 'EqualEnergy' or 'Traditional'.
#(Default = 'EqualEnergy').
self.freqDisc = 'EqualEnergy'
#wavegauge1loc - [m] Wave gauge 1 [x,y] location
#(Default = [0,0]).
self.wavegauge1loc = [0,0]
#wavegauge2loc - [m] Wave gauge 2 [x,y] location
#(Default = [0,0]).
self.wavegauge2loc = [0,0]
#wavegauge3loc - [m] Wave gauge 3 [x,y] location
#(Default = [0,0]).
self.wavegauge3loc = [0,0]
#currentSpeed - [m/s] Surface current speed that is uniform along the
#water column.
#(Default = 0).
self.currentSpeed = 0
#currentDirection - [deg] Surface current direction.
#(Default = 0).
self.currentDirection = 0
#currentOption - [-] Define the sub-surface current model to be used in
#WEC-Sim.
#(Default = 0)
#0 : Depth-independent model
#1 : 1/7 power law variation with depth
#2 : linear variation with depth
#3 : no current
self.currentOption = 3
#currentDepth - [m] Define the depth over which the sub-surface current is
#modeled.
#For options (1) and (2) the currentDepth must be defined. The
#current is not calculated for any depths greater than the
#specified currentDepth. (Default = 0).
self.currentDepth = 0
def _internalProperties(self):
"""
The following properties are for internal use
Returns
-------
typeNum, bemFreq, waterDepth, deepWaterWave, waveAmpTime,waveAmpTime1,
waveAmpTime2, waveAmpTime3, A, w, phase, dw, k, S, Pw
"""
#typeNum - Number to represent different type of waves
self.typeNum = []
#bemFreq - Number of wave frequencies from BEM
self.bemFreq = []
#waterDepth - [m] Water depth (from BEM)
self.waterDepth = []
#deepWaterWave - Deep water or not, depending on input from WAMIT,
#NEMOH and AQWA
self.deepWaterWave = []
#waveAmpTime - [m] Wave elevation time history
self.waveAmpTime = []
#waveAmpTime1 - [m] Wave elevation time history at a wave gauge 1 location
#specified by user
self.waveAmpTime1 = []
#waveAmpTime2 - [m] Wave elevation time history at a wave gauge 2 location
#specified by user
self.waveAmpTime2 = []
#waveAmpTime3 - [m] Wave elevation time history at a wave gauge 3 location
#specified by user
self.waveAmpTime3 = []
#A - [m] Wave amplitude for regular waves or 2*(wave spectrum vector) for
#irregular waves
self.A = []
#w - [rad/s] Wave frequency (regular waves) or
#wave frequency vector (irregular waves)
self.w = []
#phase - [rad] Wave phase (only used for irregular waves)
self.phase = 0
#dw - [rad] Frequency spacing for irregular waves.
self.dw = 0
#k - Wave Number
self.k = []
#S - Wave Spectrum [m^2-s/rad] for 'Traditional'
self.S = []
#Pw - Wave Power Per Unit Wave Crest [W/m]
self.Pw = []
def __init__(self,wType):
"""
waveClass constructor function
Parameters
----------
Returns
-------
self - waveClass object
"""
self.wType = wType
self.gamma = 3.3
self.w = []
if self.wType == 'noWave': #No Waves with Constant Hydrodynamic Coefficients
self.typeNum = 0
elif self.wType == 'noWaveCIC': #No Waves w/Convolution Integral Calculation
self.typeNum = 1
elif self.wType == 'regular': #Regular Waves with Constant Hydrodynamic Coefficients
self.typeNum = 10
elif self.wType == 'regularCIC': #Regular Waves w/Convolution Integral Calculation
self.typeNum = 11
elif self.wType == 'irregular': #Irregular Waves with 'PM', BS' or 'JS' wave spectrum
self.typeNum = 20
elif self.wType == 'spectrumImport': #Irregular waves with imported wave spectrum
self.typeNum = 21
elif self.wType == 'etaImport': #Waves with imported wave elevation time-history
self.typeNum = 30
def plotEta(self, *rampTime):
"""
Plot wave elevation time-history
This method is for our visual understanding.
Does not get used in any method
rampTime input argument is optional
"""
plt.figure(figsize=(10,8))
plt.plot(self.waveAmpTime[0],self.waveAmpTime[1])
plt.title('Wave Surfave Elevation')
if np.size(rampTime) == 1: # If ramptime is given we can specify it but will generate similar graph
plt.plot(np.array(rampTime),np.array(1.5*min(self.waveAmpTime[1]),1.5*max(self.waveAmpTime[1])))
plt.title(['Wave Surfave Elevation, Ramp Time ' + str(rampTime) +' (s)'])
plt.xlabel('Time (s)')
plt.ylabel('Eta (m)')
def plotSpectrum(self):
"""
Plot wave spetrum
This method is for our visual understanding.
Does not get used in any method
"""
m0 = np.trapz(self.S, x = self.w)
HsTest = 4*np.sqrt(m0)
I = np.argmax(np.abs(self.S))
wp = self.w[I]
TpTest = 2*np.pi/wp
plt.figure(figsize=(10,8))
plt.plot(self.w,self.S,'s-')
plt.plot(wp,np.array(max(self.S)))
plt.xlim([0, max(self.w)])
plt.title([self.spectrumType, ' Spectrum, T_p= ' + str(TpTest) + ' [s], H_m_0= ' + str(HsTest) + ', [m]'])
if self.spectrumType == 'JS':
plt.title([self.spectrumType, ' Spectrum, T_p= ' + str(TpTest) + ' [s], H_m_0= ' + str(HsTest) + ', [m], gamma = ' + str(self.gamma)])
plt.xlabel('Frequency (rad/s)')
plt.ylabel('Spectrum (m^2-s/rad)')
def waveSetup(self,bemFreq,wDepth,rampTime,dt,maxIt,g,rho,endTime):
"""
Set up wave for all wave type. Input parameters are from simulationClass.py
This method is the most important method to check out in this class
"""
self.bemFreq = bemFreq # array of the beam frequency from BEMIO. Only the max and min values gets used.
self.setWaveProps(wDepth) # method called to set up wave properties.
if (self.wType == 'noWave') or (self.wType == 'noWaveCIC'):
if np.size(self.w) == 0:
self.w = 2*np.pi/self.T
self.waveNumber(g) # used to set self.k
self.A = self.H/2
self.waveElevNowave(maxIt,dt) # method called to set wave elevation
elif (self.wType == 'regular') or (self.wType == 'regularCIC'):
if np.size(self.w) == 0:
self.w = 2*np.pi/self.T
self.A = self.H/2
self.waveNumber(g) # used to set self.k
self.waveElevReg(rampTime, dt, maxIt) # method called to set wave elevation
self.wavePowerReg(g,rho) # method called to find wave power
elif (self.wType == 'irregular') or (self.wType == 'spectrumImport'):
WFQSt = np.min(bemFreq)
WFQEd = np.max(bemFreq)
if np.size(self.freqRange) != 0: # frequency range that can be set bu user. eg.[0.2, 0.5]
if self.freqRange[0] > WFQSt and self.freqRange[0] > 0: # if minimum frequency range value set by user is greater than minimum beam frequency and 0
WFQSt = self.freqRange[0] # set new minimum beam frequency provided by user
else:
warnings.warn("Min frequency range outside BEM data, min frequency set to min BEM frequency",DeprecationWarning)
if self.freqRange[1] < WFQEd and self.freqRange[1] > WFQSt: # if maximum frequency range value set by user is lower than maximum beam frequency but greater than minimum beam frequency
WFQEd = self.freqRange[1] # set new maximum beam frequncy provided by user
else:
warnings.warn("Max frequency range outside BEM data, max frequency set to max BEM frequency",DeprecationWarning)
if self.freqDisc == 'Traditional': # Traditional method of computing. Refer to theory of waveclass provided by WEC-Sim to understand the theory.
if np.size(self.numFreq) == 0: # numfreq for Traditional is 1000 for default
self.numFreq = 1000
self.w = arange_MATLAB(WFQSt,WFQEd+((WFQEd-WFQSt)/(self.numFreq-1)),(WFQEd-WFQSt)/(self.numFreq-1))
self.dw = np.ones(shape=(self.numFreq,1))*(WFQEd-WFQSt)/(self.numFreq-1)
elif self.freqDisc == 'EqualEnergy': # Default way of computing irregular wave. Refer to theory of waveclass provided by WEC-Sim to understand the theory.
numFreq_interp = 500000 # number of interpolation that will set array size for SF and S_f used in irregWaveSpectrum method. Lowering this value might decrease the run time but accuracy will decrease
self.w = arange_MATLAB(WFQSt,WFQEd+((WFQEd-WFQSt)/numFreq_interp),(WFQEd-WFQSt)/numFreq_interp)
self.dw = np.mean(np.diff(self.w))
if np.size(self.numFreq) == 0: # numfreq for EqualEnergy is 500 for default
self.numFreq = 500
elif self.freqDisc == 'Imported': # set from setWaveProps method
data = self.readData(self.spectrumDataFile) # call on readData method to get files in both mat file and txt file
freq_data = data[0] # the first row out of the three rows in spectrum data file is frequency
self.w = np.array([i*2*np.pi for i in freq_data
if i>=min(bemFreq)/2/np.pi and i<=max(bemFreq)/2/np.pi])
self.numFreq = len(self.w)
self.dw = np.zeros(self.numFreq)
self.dw[0]= np.array(self.w[1]-self.w[0])
self.dw[1:self.numFreq-1]= np.array((self.w[2:]-self.w[:-2])/2)
self.dw[self.numFreq-1]= np.array(self.w[-1]-self.w[-2])
self.setWavePhase() # method called to generate wave phase. there can be multiple phase if there are more than one wave direction
self.irregWaveSpectrum(g,rho) # method called to calculate for different kinds of irregular wave calculation methods
self.waveNumber(g) # used to set self.k
self.waveElevIrreg(rampTime, dt, maxIt, self.dw) # method called to set wave elevation
elif self.wType == 'etaImport':
#Import 'etaImport' time-series here and interpolate
data = self.readData(self.etaDataFile) #Import time-series
t = arange_MATLAB(0,endTime+dt,dt) #simulation time
self.waveElevUser(rampTime, dt, maxIt, data, t) # method called to set wave elevation
t2 = np.arange(maxIt+1)*dt
initialZeros = np.zeros((maxIt+1))
self.waveAmpTime1 = [t2,initialZeros] # set wave elevation as zeros since we just wants to look at imported data
self.waveAmpTime2 = self.waveAmpTime1 # set wave elevation as zeros since we just wants to look at imported data
self.waveAmpTime3 = self.waveAmpTime1 # set wave elevation as zeros since we just wants to look at imported data
def setWaveProps(self,wDepth):
"""
Sets wave properties
check for wave depth
check for wave type of noWave, noWaveCIC, and spectrumImport
"""
if wDepth == 'infinite': # Can be 'infinite' or number. From BEMIO
self.deepWaterWave = 1 # means deep water
self.waterDepth = 200 # minimu water depth for deep water set
print('\tInfinite water depth specified in BEM, "waves.waterDepth" set to 200m for vizualisation.\n')
else:
self.deepWaterWave = 0 # means shallow water
self.waterDepth = wDepth # set it to specific water depth. Can cause warning when some calculations reach infinity
if self.wType == 'noWave':
self.H = 0 # set wave height as 0
elif self.wType == 'noWaveCIC':
self.H = 0 # set wave height as 0
if np.size(self.w) == 0 and self.T == 'NOT DEFINED':
self.w = np.min(self.bemFreq)
self.T = 2*np.pi/self.w
elif np.size(self.w) == 0:
self.w = 2*np.pi/self.T
else:
self.T = 2*np.pi/self.w
elif self.wType == 'spectrumImport':
self.H = 0
self.T = 0
self.freqDisc = 'Imported' # one of the type of freqDisc that used later in waveSetup method
self.spectrumType = 'spectrumImport'
def waveNumber(self, g):
"""
Calculate wave number
"""
self.k= self.w**2/g # general method of calculating k
if self.deepWaterWave == 0: # calculate k directly from specific water depth
for i in range(100):
self.k = self.w**2/g/np.tanh(self.k*self.waterDepth)
def readData(self,file):
"""
Return Data from the file
Supported file type: .txt, .mat
"""
if file.endswith('.txt'):
data = np.conj(np.transpose(np.loadtxt(file))) # transforms data in to array no matter it was in vector form or array form
elif file.endswith('.mat'): # specific for MATLAB data file. Allows collaboration between MATLAB user and Python user.
matFile = sio.loadmat(file)
keys = list(matFile.keys())[-1]
data = np.conj(np.transpose(matFile[keys])) # this transforms data in to array no matter it was in vector form or array form
return data
def setWavePhase(self):
"""
Sets the irregular wave's random phase
MATLAB and Python use same random number generator
multiple arrays of phase is not supported for regular wave as it was not supported in the WEC-Sim
"""
if self.phaseSeed != 0:
np.random.seed(self.phaseSeed) #Phase seed = 1,2,3,...,etc
else:
np.random.seed(np.random.shuffle(self.phaseSeed)) # shuffle phase seed
if (self.freqDisc == 'EqualEnergy') or (self.freqDisc == 'Traditional'):
self.phase = 2*np.pi*np.conj(np.transpose(np.random.rand(self.numFreq,np.size(self.waveDir)))) # for multiple wave direction, multiple arrays of phase will be made
elif (self.freqDisc == 'Imported'):
data = self.readData(self.spectrumDataFile)
if len(data) == 3: # if imported spectrum data file is correct it should have 3 rows of data
freq_data = data[0]
self.phase = np.array([[x for x,i in zip(data[2],freq_data)
if i>=min(self.bemFreq)/2/np.pi and i<=max(self.bemFreq)/2/np.pi]])
else:
self.phase = 2*np.pi*np.random.rand(1,self.numFreq) # if imported spectrum data is faulty, phase will be calculated randomly
def waveElevNowave(self,maxIt,dt):
"""
Set noWave elevation time-history
"""
t = np.arange(maxIt+1)*dt
initialZeros = | np.zeros((maxIt+1)) | numpy.zeros |
# -*- coding: utf-8 -*-
import numpy as np
import torch
import random
from torch import multiprocessing as mp
from convlab2.dialog_agent.agent import PipelineAgent
from convlab2.dialog_agent.session import BiSession
from convlab2.dialog_agent.env import Environment
from convlab2.dst.rule.multiwoz import RuleDST
from convlab2.policy.rule.multiwoz import RulePolicy
from convlab2.policy.rlmodule import Memory, Transition
from convlab2.evaluator.multiwoz_eval import MultiWozEvaluator
from pprint import pprint
import json
import matplotlib.pyplot as plt
import sys
import logging
import os
import datetime
import argparse
import os
def init_logging(log_dir_path, path_suffix=None):
if not os.path.exists(log_dir_path):
os.makedirs(log_dir_path)
current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
if path_suffix:
log_file_path = os.path.join(log_dir_path, f"{current_time}_{path_suffix}.log")
else:
log_file_path = os.path.join(log_dir_path, "{}.log".format(current_time))
stderr_handler = logging.StreamHandler()
file_handler = logging.FileHandler(log_file_path)
format_str = "%(levelname)s - %(filename)s - %(funcName)s - %(lineno)d - %(message)s"
logging.basicConfig(level=logging.DEBUG, handlers=[stderr_handler, file_handler], format=format_str)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def sampler(pid, queue, evt, env, policy, batchsz):
"""
This is a sampler function, and it will be called by multiprocess.Process to sample data from environment by multiple
processes.
:param pid: process id
:param queue: multiprocessing.Queue, to collect sampled data
:param evt: multiprocessing.Event, to keep the process alive
:param env: environment instance
:param policy: policy network, to generate action from current policy
:param batchsz: total sampled items
:return:
"""
buff = Memory()
# we need to sample batchsz of (state, action, next_state, reward, mask)
# each trajectory contains `trajectory_len` num of items, so we only need to sample
# `batchsz//trajectory_len` num of trajectory totally
# the final sampled number may be larger than batchsz.
sampled_num = 0
sampled_traj_num = 0
traj_len = 50
real_traj_len = 0
while sampled_num < batchsz:
# for each trajectory, we reset the env and get initial state
s = env.reset()
for t in range(traj_len):
# [s_dim] => [a_dim]
s_vec = torch.Tensor(policy.vector.state_vectorize(s))
a = policy.predict(s)
# interact with env
next_s, r, done = env.step(a)
# a flag indicates ending or not
mask = 0 if done else 1
# get reward compared to demostrations
next_s_vec = torch.Tensor(policy.vector.state_vectorize(next_s))
# save to queue
buff.push(s_vec.numpy(), policy.vector.action_vectorize(a), r, next_s_vec.numpy(), mask)
# update per step
s = next_s
real_traj_len = t
if done:
break
# this is end of one trajectory
sampled_num += real_traj_len
sampled_traj_num += 1
# t indicates the valid trajectory length
# this is end of sampling all batchsz of items.
# when sampling is over, push all buff data into queue
queue.put([pid, buff])
evt.wait()
def sample(env, policy, batchsz, process_num):
"""
Given batchsz number of task, the batchsz will be splited equally to each processes
and when processes return, it merge all data and return
:param env:
:param policy:
:param batchsz:
:param process_num:
:return: batch
"""
# batchsz will be splitted into each process,
# final batchsz maybe larger than batchsz parameters
process_batchsz = np.ceil(batchsz / process_num).astype(np.int32)
# buffer to save all data
queue = mp.Queue()
# start processes for pid in range(1, processnum)
# if processnum = 1, this part will be ignored.
# when save tensor in Queue, the process should keep alive till Queue.get(),
# please refer to : https://discuss.pytorch.org/t/using-torch-tensor-over-multiprocessing-queue-process-fails/2847
# however still some problem on CUDA tensors on multiprocessing queue,
# please refer to : https://discuss.pytorch.org/t/cuda-tensors-on-multiprocessing-queue/28626
# so just transform tensors into numpy, then put them into queue.
evt = mp.Event()
processes = []
for i in range(process_num):
process_args = (i, queue, evt, env, policy, process_batchsz)
processes.append(mp.Process(target=sampler, args=process_args))
for p in processes:
# set the process as daemon, and it will be killed once the main process is stoped.
p.daemon = True
p.start()
# we need to get the first Memory object and then merge others Memory use its append function.
pid0, buff0 = queue.get()
for _ in range(1, process_num):
pid, buff_ = queue.get()
buff0.append(buff_) # merge current Memory into buff0
evt.set()
# now buff saves all the sampled data
buff = buff0
return buff.get_batch()
def evaluate(dataset_name, model_name, load_path, calculate_reward=True):
seed = 20190827
random.seed(seed)
| np.random.seed(seed) | numpy.random.seed |
import sys
import math
import random
from copy import deepcopy
from itertools import combinations
from unittest.mock import patch, MagicMock
import pytest
import numpy as np
from text_rank.graph import AdjacencyList, AdjacencyMatrix, Vertex
from text_rank.text_rank import (
sum_edges,
accumulate_score,
text_rank,
text_rank_init_list,
text_rank_init_matrix,
text_rank_update_list,
text_rank_update_matrix,
text_rank_output_list,
text_rank_output_matrix,
)
from utils import rand_str
TRIALS = 100
GOLD_SEED = 1337
GOLD_WS = [
0.11241286838510592,
0.09703866532611793,
0.0665736515734767,
0.10659608608017906,
0.03015021720224241,
0.1500117991660606,
0.0698230006775505,
0.14368633275201811,
0.1677271414904536,
0.05598023734679501,
]
def test_sum_edges():
def test():
edges = {}
gold_total = random.randint(0, 100)
total = gold_total
i = 0
while total > 0:
weight = random.randint(1, 5)
weight = min(weight, total)
edges[i] = weight
i += 1
total -= weight
score = sum_edges(edges)
assert score == gold_total
for _ in range(TRIALS):
test()
def test_accumulate_score():
vertex = MagicMock(edges_in={0: 0.5, 1: 0.6, 4: 0.1})
denom = [0.1, 0.7, 0.4, 0.0, 0.3]
ws = [0.9, 0.2, 0.5, 0.1, 0.4]
gold = 4.8047619047619055
assert math.isclose(accumulate_score(vertex, ws, denom), gold)
def test_init_list_shapes():
graph = MagicMock(vertices=[MagicMock() for _ in range(random.randint(10, 100))])
ws, denom = text_rank_init_list(graph)
assert len(ws) == len(denom) == len(graph.vertices)
def test_init_list_ws_inits():
graph = MagicMock(vertices=[MagicMock() for _ in range(len(GOLD_WS))])
ws, _ = text_rank_init_list(graph, seed=GOLD_SEED)
for w, gw in zip(ws, GOLD_WS):
assert math.isclose(w, gw)
def test_init_list_d_norms():
graph = MagicMock(vertices=[MagicMock() for _ in range(random.randint(10, 100))])
gold = [random.random() for _ in range(len(graph.vertices))]
values = deepcopy(gold)
zero_indices = random.sample(range(len(gold)), len(gold) // 2)
for i in zero_indices:
gold[i] = 1
values[i] = 0
with patch("text_rank.text_rank_module.sum_edges") as sum_patch:
sum_patch.side_effect = values
_, denom = text_rank_init_list(graph)
for d, g in zip(denom, gold):
assert math.isclose(d, g)
def test_init_list_sum_to_one():
graph = MagicMock(vertices=[MagicMock() for _ in range(random.randint(10, 100))])
ws, _ = text_rank_init_list(graph)
assert math.isclose(sum(ws), 1)
def test_init_list_uniform():
graph = MagicMock(vertices=[MagicMock() for _ in range(random.randint(10, 100))])
golds = [1 for _ in graph.vertices]
norm = sum(golds)
golds = [g / norm for g in golds]
ws, _ = text_rank_init_list(graph, uniform=True)
for w, g in zip(ws, golds):
assert math.isclose(w, g)
def test_init_matrix_shapes():
verts = random.randint(10, 100)
graph = MagicMock(vertex_count=verts, adjacency_matrix=np.random.rand(verts, verts))
ws, denom = text_rank_init_matrix(graph)
assert len(ws) == len(denom) == verts
def test_init_matrix_ws_inits():
graph = MagicMock(vertex_count=len(GOLD_WS), adjacency_matrix=np.random.rand(len(GOLD_WS), len(GOLD_WS)))
ws, _ = text_rank_init_matrix(graph, seed=GOLD_SEED)
np.testing.assert_allclose(ws, np.array(GOLD_WS))
def test_init_matrix_d_norms():
graph = MagicMock(vertices=[MagicMock() for _ in range(random.randint(10, 100))])
gold = [random.random() for _ in range(len(graph.vertices))]
values = deepcopy(gold)
zero_indices = random.sample(range(len(gold)), len(gold) // 2)
for i in zero_indices:
gold[i] = 1
values[i] = 0
with patch("text_rank.text_rank_module.sum_edges") as sum_patch:
sum_patch.side_effect = values
_, denom = text_rank_init_matrix(graph)
for d, g in zip(denom, gold):
assert math.isclose(d, g)
def test_init_matrix_sum_to_one():
verts = random.randint(10, 100)
graph = MagicMock(vertex_count=verts, adjacency_matrix=np.random.rand(verts, verts))
ws, _ = text_rank_init_matrix(graph)
assert math.isclose(sum(ws), 1)
def test_init_matrix_uniform():
verts = random.randint(10, 100)
graph = MagicMock(vertex_count=verts, adjacency_matrix=np.random.rand(verts, verts))
golds = [1 for _ in graph.vertices]
norm = sum(golds)
golds = [g / norm for g in golds]
ws, _ = text_rank_init_list(graph, uniform=True)
np.testing.assert_allclose(ws, np.array(golds))
def test_update_list():
graph = MagicMock(vertices=[MagicMock() for _ in range(random.randint(10, 100))])
damping = random.random()
golds = [random.random() for _ in graph.vertices]
update_values = [(g - 1 + damping) / damping for g in golds]
with patch("text_rank.text_rank_module.accumulate_score") as acc_patch:
acc_patch.side_effect = update_values
ws = text_rank_update_list(graph, None, None, damping)
for w, g in zip(ws, golds):
math.isclose(w, g)
def test_update_matrix():
verts = np.random.randint(10, 100)
ws = np.random.rand(verts)
adj = np.random.rand(verts, verts)
denom = np.random.rand(verts, 1)
graph = MagicMock(adjacency_matrix=adj)
damping = np.random.rand()
gold = (1 - damping) + damping * np.sum(ws.reshape((-1, 1)) * (adj / denom), axis=0)
res = text_rank_update_matrix(graph, ws, denom, damping)
np.testing.assert_allclose(res, gold)
def test_output_list():
v = random.randint(2, 10)
verts = [rand_str() for _ in range(v)]
graph = MagicMock(vertices=[Vertex(v) for v in verts])
ws = np.random.rand(v)
gold_labels = [verts[i] for i in np.argsort(ws)[::-1]]
gold_score = np.sort(ws)[::-1]
gold_score = gold_score / np.sum(gold_score)
res = text_rank_output_list(graph, ws)
for gl, gs, (l, s) in zip(gold_labels, gold_score, res):
assert l == gl
assert math.isclose(s, gs)
def test_output_list_sum_to_one():
v = random.randint(2, 10)
verts = [rand_str() for _ in range(v)]
graph = MagicMock(vertices=[Vertex(v) for v in verts])
ws = | np.random.rand(v) | numpy.random.rand |
import cv2
import numpy as np
def add(img1, img2, xyCor):
xPos,yPos = xyCor
xBefore,yBefore=xPos,yPos
xReal,yReal=0,0
imageWidth2 = img2.shape[1]
imageHeight2 = img2.shape[0]
while xReal < imageWidth2:
while yReal < imageHeight2:
Pos1=cv2.add(int(img1[yPos,xPos,0]),int(img2[yReal,xReal,0]))
Pos2=cv2.add(int(img1[yPos,xPos,1]),int(img2[yReal,xReal,1]))
Pos3=cv2.add(int(img1[yPos,xPos,2]),int(img2[yReal,xReal,2]))
img1.itemset((yPos, xPos, 0),int(Pos1[0][0]) )
img1.itemset((yPos, xPos, 1),int(Pos2[0][0]) )
img1.itemset((yPos, xPos, 2),int(Pos3[0][0]) )
yPos = yPos + 1
yReal= yReal+1
yPos = yBefore
yReal = 0
xPos = xPos + 1
xReal = xReal +1
return img1
def subtract(img1,img2,xyCor):
xPos,yPos = xyCor
xBefore,yBefore=xPos,yPos
xReal,yReal=0,0
imageWidth2 = img2.shape[1]
imageHeight2 = img2.shape[0]
while xReal < imageWidth2:
while yReal < imageHeight2:
Pos1=cv2.subtract(int(img1[yPos,xPos,0]),int(img2[yReal,xReal,0]))
Pos2=cv2.subtract(int(img1[yPos,xPos,1]),int(img2[yReal,xReal,1]))
Pos3=cv2.subtract(int(img1[yPos,xPos,2]),int(img2[yReal,xReal,2]))
img1.itemset((yPos, xPos, 0),int(Pos1[0][0]) )
img1.itemset((yPos, xPos, 1),int(Pos2[0][0]) )
img1.itemset((yPos, xPos, 2),int(Pos3[0][0]) )
yPos = yPos + 1
yReal= yReal+1
yPos = yBefore
yReal = 0
xPos = xPos + 1
xReal = xReal +1
return img1
def multiply(img1,img2,xyCor):
xPos,yPos = xyCor
xBefore,yBefore=xPos,yPos
xReal,yReal=0,0
imageWidth2 = img2.shape[1]
imageHeight2 = img2.shape[0]
while xReal < imageWidth2:
while yReal < imageHeight2:
Pos1=cv2.multiply(int(img1[yPos,xPos,0]),int(img2[yReal,xReal,0]))
Pos2=cv2.multiply(int(img1[yPos,xPos,1]),int(img2[yReal,xReal,1]))
Pos3=cv2.multiply(int(img1[yPos,xPos,2]),int(img2[yReal,xReal,2]))
if(np.isnan(Pos1[0][0]) or np.isinf(Pos1[0][0])):
Pos1[0][0] = 0
if( | np.isnan(Pos2[0][0]) | numpy.isnan |
import numpy as np
import pybullet as p
import itertools
class Robot():
index = 0
Kf =1 # proportionality formation constant
Kt =1 # target proportionality constant
def __init__(self, init_pos, robot_id, dt):
self.id = robot_id
self.dt = dt
# self.pybullet_id = p.loadSDF("../models/robot.sdf")[0]
self.pybullet_id = p.loadURDF("../models/robot_with_4_tyres_trail_2/urdf/robot_with_4_tyres_trail_2.urdf",[0, 0, 1])
self.joint_ids = list(range(p.getNumJoints(self.pybullet_id)))
self.initial_position = init_pos
self.velocity = [0,0]
self.reset()
p_des = []
goal_pose = []
self.time = 0
self.old_theta_1 = 0
self.old_theta_2 = 0
self.old_theta_3 = 0
self.old_theta_4 = 0
# No friction between body and surface.
p.changeDynamics(self.pybullet_id, -1, lateralFriction=2, rollingFriction=0.)
# Friction between joint links and surface.
for i in range(p.getNumJoints(self.pybullet_id)):
p.changeDynamics(self.pybullet_id, i, lateralFriction=2, rollingFriction=0.)
self.messages_received = []
self.messages_to_send = []
self.neighbors = []
def reset(self):
p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))
def set_wheel_velocity(self, V_x,V_y,W):
"""
Sets the wheel velocity,expects an array containing two numbers (left and right wheel vel)
"""
# assert len(vel) == 2, "Expect velocity to be array of size two"
# p.setJointMotorControlArray(self.pybullet_id, self.joint_ids, p.VELOCITY_CONTROL,
# targetVelocities=vel)
l = 0.23
w = 0.23
v1_x = V_x - W*l # velcity for robot 1 x component
v1_y = V_y + W*w # velocity for robot 1 y component
v2_x = V_x + W*l # velcity for robot 2 x component
v2_y = V_y + W*w # velocity for robot 2 y component
v3_x = V_x - W*l # velcity for robot 3 x component
v3_y = V_y - W*w # velocity for robot 3 y component
v4_x = V_x + W*l # velcity for robot 4 x component
v4_y = V_y - W*w # velocity for robot 4 y component
#add this delta_theta to the previous theta
delta_theta_1 = np.arctan2(v1_y,v1_x) # theta for robot 1
delta_theta_2 = np.arctan2(v2_y,v2_x) # theta for robot 2
delta_theta_3 = np.arctan2(v3_y,v3_x) # theta for robot 3
delta_theta_4 = np.arctan2(v4_y,v4_x) # theta for robot 4
v1 = np.linalg.norm([v1_x,v1_y]) # maginitude for velocity of robot 1
v2 = np.linalg.norm([v2_x,v2_y]) # maginitude for velocity of robot 2
v3 = np.linalg.norm([v3_x,v3_y]) # maginitude for velocity of robot 3
v4 = np.linalg.norm([v4_x,v4_y]) # maginitude for velocity of robot 4
theta_1 = 2.13 + delta_theta_1
theta_2 = 3.23 + delta_theta_2
theta_3 = 1.28 + delta_theta_3
theta_4 = 3.67 + delta_theta_4
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 0, controlMode = p.POSITION_CONTROL,targetPosition = theta_1, force= 5)
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 2, controlMode = p.POSITION_CONTROL,targetPosition = theta_2, force= 5)
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 4, controlMode = p.POSITION_CONTROL,targetPosition = theta_3, force= 5)
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 6, controlMode = p.POSITION_CONTROL,targetPosition = theta_4, force= 5)
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 1, controlMode = p.VELOCITY_CONTROL,targetVelocity = v1, force= 100)
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 3, controlMode = p.VELOCITY_CONTROL,targetVelocity = v2, force= 100)
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 5, controlMode = p.VELOCITY_CONTROL,targetVelocity = v3, force= 100)
p.setJointMotorControl2(bodyUniqueId = self.pybullet_id, jointIndex = 7, controlMode = p.VELOCITY_CONTROL,targetVelocity = v4, force= 100)
def get_pos_and_orientation(self):
"""
Returns the position and orientation (as Yaw angle) of the robot.
"""
pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)
euler = p.getEulerFromQuaternion(rot)
return | np.array(pos) | numpy.array |
import math
import numpy as np
from utils.env_objects import Cylinder, Cube
import os
class EnvDefs:
epuck = ('EPUCK', os.path.abspath('webots_objects/E-puck.wbo'))
cylinders = [
# node DEF, node file definition, radius
('Cylinder1', os.path.abspath('webots_objects/Cylinder1.wbo'), 0.05),
('Cylinder2', os.path.abspath('webots_objects/Cylinder2.wbo'), 0.05)
]
boxes = [
# node DEF, node file definition, side length
('Box1', os.path.abspath('webots_objects/Box1.wbo'), 0.1),
('Box2', os.path.abspath('webots_objects/Box2.wbo'), 0.1)
]
wall = ('Wall', os.path.abspath('webots_objects/Wall.wbo'))
class SimpleArena:
def __init__(self, supervisor):
self.supervisor = supervisor
# initialization helper variables
self.robot_initial_position = []
self.children_field = self.supervisor.getRoot().getField('children') # Get list of all the objects of the scene
self.robot = None
self.environment_objects = []
def reset(self):
self._remove_objects()
# Respawn robot in starting position and state
epuck_def, epuck_file = EnvDefs.epuck
self.children_field.importMFNode(-2, epuck_file) # Load robot from file and add to second-to-last position
self.robot = self.supervisor.getFromDef(epuck_def)
self._insert_robot_in_random_position()
self.environment_objects = self._populate_environment_objects()
def _remove_objects(self):
if self.robot is not None:
self.robot.remove()
for environment_object in self.environment_objects:
if environment_object.webot_object:
environment_object.webot_object.remove()
def get_robot(self):
return self.robot
def _populate_environment_objects(self):
environment_objects = []
for node_def, node_file, radius in EnvDefs.cylinders:
wrapped_object = Cylinder(node_def, node_file, radius=radius)
self._place_object_in_random_position(environment_objects, wrapped_object)
environment_objects.append(wrapped_object)
for node_def, node_file, side_length in EnvDefs.boxes:
wrapped_object = Cube(node_def, node_file, side_length=side_length)
self._place_object_in_random_position(environment_objects, wrapped_object)
environment_objects.append(wrapped_object)
return environment_objects
def _place_object_in_random_position(self, placed_objects, wrapped_object):
"""
Sets the shape passed by parameter to a random valid position
within the parent's node environment.
:param wrapped_object: the wrapped object with utility functions that is to be placed
:param placed_objects: the objects that have already been placed
:return: the node corresponding to the shape
"""
self.children_field.importMFNode(-1, wrapped_object.node_file)
shape = self.supervisor.getFromDef(wrapped_object.node_def)
wrapped_object.webot_object = shape
x, z = self._generate_random_valid_position(placed_objects, wrapped_object)
trans_field = shape.getField('translation')
initial_position = [x, 0.05, z]
wrapped_object.initial_position = initial_position
trans_field.setSFVec3f(initial_position)
shape.resetPhysics()
return wrapped_object
def _generate_random_valid_position(self, placed_objects, wrapped_object):
valid_position_found = False
min_distance_from_wall = wrapped_object.get_min_distance_from_wall()
position_x = None
position_z = None
while not valid_position_found:
position_x, position_z = self._get_random_coords_in_arena(min_distance_from_wall)
if self._intersects_with_robot(position_x, position_z):
continue
valid_position_found = True
for placed_object in placed_objects:
if placed_object.is_inside_object(position_x, position_z, wrapped_object.get_min_distance_from_wall()):
valid_position_found = False
continue
return position_x, position_z
@staticmethod
def _get_random_coords_in_arena(min_distance_from_wall):
floor_x = 1 / 2
floor_z = 1 / 2
position_x = np.random.uniform(-floor_x + min_distance_from_wall, floor_x - min_distance_from_wall)
position_z = np.random.uniform(-floor_z + min_distance_from_wall, floor_z - min_distance_from_wall)
return position_x, position_z
def _intersects_with_robot(self, position_x, position_z):
position_vec = self.robot_initial_position
return np.sqrt(((position_vec[0] - position_x) ** 2) + ((position_vec[2] - position_z) ** 2)) < 0.1
def _insert_robot_in_random_position(self):
trans_field = self.robot.getField('translation')
x, z = self._get_random_coords_in_arena(0.045)
self.robot_initial_position = [x, 0.01, z]
trans_field.setSFVec3f(self.robot_initial_position)
class Maze:
def __init__(self, supervisor):
self.supervisor = supervisor
# initialization helper variables
self.children_field = self.supervisor.getRoot().getField('children') # Get list of all the objects of the scene
self.robot = None
self.arena_size = np.array(self.supervisor.getFromDef('arena').getField('floorSize').getSFVec2f())
self.tile_size = np.array([0.25, 0.25])
self.walls = self._create_walls()
def reset(self):
self._respawn_robot()
self._create_maze()
def _respawn_robot(self):
if self.robot is not None:
self.robot.remove()
epuck_def, epuck_file = EnvDefs.epuck
self.children_field.importMFNode(-2, epuck_file) # Load robot from file and add to second-to-last position
self.robot = self.supervisor.getFromDef(epuck_def)
self._insert_robot_in_initial_position()
def _create_walls(self):
wall_def, wall_file = EnvDefs.wall
walls = []
for i in range(int((self.arena_size[0] / 0.25 + 1) * (self.arena_size[1] / 0.25 + 1))):
self.children_field.importMFNode(0, wall_file)
wb_object = self.supervisor.getFromDef(wall_def)
self._set_object_position(wb_object, 10 + i*0.1, 0)
walls.append(wb_object)
return walls
def _create_maze(self):
shape = np.ceil(self.arena_size / self.tile_size).astype(int)
h_walls = np.ones(shape - | np.array([1, 0]) | numpy.array |
import tensorflow as tf
import scipy.sparse as sp
import numpy as np
import GCN
import utils
class AFGSM:
def __init__(self, A, X, Z, num_vicious_nodes, num_vicious_edges, dmin=1):
"""
:param A: sparse matrix, the adjacency matrix ,[n X n]
:param X: sparse or dense matrix, the feature matrix ,[n x d], d is the dimension of features
:param Z: sparse matrix, the labels, [n x c], c is the dimension of one-hot label
:param num_vicious_nodes: int, the number of vicious nodes
:param num_vicious_edges: int, the number of vicous edges
:param dmin: int, min degree assigned for vicious nodes
"""
self.A = A.tocsr()
self.A_orig = self.A.copy()
self.A.setdiag(0)
self.X = X
self.Z = Z
self.labels = list(np.squeeze(np.argmax(self.Z, axis=1)))
self.num_vicious_nodes = num_vicious_nodes
self.num_vicious_edges = num_vicious_edges
self.An = utils.preprocess_graph(self.A)
self.degree = np.squeeze(self.A.sum(axis=1).getA()) + 1
self.old_degree = np.squeeze(self.A.sum(axis=1).getA()) + 1
if sp.issparse(self.X):
self.cooc_X = sp.csr_matrix(self.X.T.dot(self.X))
self.cooc_X[self.cooc_X.nonzero()] = 1
self.X_d = int(np.sum(X) / self.X.shape[0])
else:
self.cooc_X = None
self.X_d = None
self.D_inv = sp.diags(1 / self.degree)
self.D_inv_sqrt = sp.diags(1 / np.sqrt(self.degree))
self.dv = self.get_random_degrees(dmin)
def get_random_degrees(self, dmin=1):
"""
assign degrees of vicious nodes randomly
:param dmin: min degree assigned for vicious nodes
:return: a numpy array contains the degrees of vicious nodes
"""
dv = np.zeros(self.num_vicious_nodes,dtype=int) + dmin
for _ in range(int(self.num_vicious_edges-dmin*self.num_vicious_nodes)):
dv[np.random.randint(0, self.num_vicious_nodes)] += 1
return dv
def cal_loss(self, logits_attacked, idx):
best_wrong_label = np.argmax(logits_attacked[idx, :] - 1000000 * self.Z[idx, :])
true_label = np.argmax(self.Z[idx, :])
loss_ = logits_attacked[idx, true_label] - logits_attacked[idx, best_wrong_label]
return loss_
def classification_margins(self, probs, idx):
true_label = np.argmax(self.Z[idx, :])
best_wrong_label = np.argmax(probs[0, :] - 1000 * self.Z[idx, :])
return -probs[0, best_wrong_label] + probs[0, true_label]
def update_M(self, M1, M2, w, idx,i, direct_attack):
"""
update the intermediate quantity for calculation of gradients
"""
nodes_changed = self.A[-1, 0:-1].nonzero()[1]
if direct_attack:
M1 = M1 * np.sqrt(self.dv[i-1] / self.dv[i])
M1[nodes_changed] = M1[nodes_changed] / np.sqrt(self.degree[nodes_changed]) * np.sqrt(self.old_degree[nodes_changed])
M1_1 = (1 / np.sqrt(self.dv[i]*self.degree[-1]) * self.X[-1, :]).dot(w)
M1_ = np.zeros(M1.shape[0] + 1)
M1_[0:-1] = M1
M1_[-1] = M1_1
else:
M1_ = None
M2[nodes_changed] = M2[nodes_changed] * self.degree[nodes_changed] / self.old_degree[nodes_changed]
M2_1 = (self.A[-1, idx])/self.degree[-1]
M2_ = | np.zeros(M2.shape[0]+1) | numpy.zeros |
import operator
import threading
import functools
import itertools
import contextlib
import collections
import numpy as np
from ..autoray import (
get_lib_fn,
infer_backend,
get_dtype_name,
register_function,
astype,
)
_EMPTY_DICT = {}
class LazyArray:
"""A lazy array representing a shaped node in a computational graph.
"""
__slots__ = (
"_backend",
"_fn",
"_args",
"_kwargs",
"_shape",
"_dtype",
"_data",
"_deps",
)
def __init__(
self, backend, fn, args, kwargs, shape, dtype, deps=None,
):
# info required to perform the computation
self._backend = backend
self._fn = fn
self._args = args
if kwargs is None:
self._kwargs = _EMPTY_DICT
else:
self._kwargs = kwargs
# resulting array information
self._shape = shape
self._dtype = dtype
self._data = None
# lazy arrays this ``LazyArray`` depends on
if deps is None:
# automatically find them
self._deps = (*find_lazy(self._args), *find_lazy(self._kwargs))
else:
# manually specified (more efficient)
self._deps = deps
@classmethod
def from_data(cls, data):
"""Create a new ``LazyArray`` directly from a concrete array.
"""
obj = cls.__new__(cls)
obj._backend = infer_backend(data)
obj._fn = obj._args = obj._kwargs = None
obj._shape = tuple(map(int, data.shape))
obj._dtype = get_dtype_name(data)
obj._data = data
obj._deps = ()
return obj
@classmethod
def from_shape(cls, shape, backend='numpy', dtype=None):
"""Create a new ``LazyArray`` with a given shape.
"""
obj = cls.__new__(cls)
obj._backend = backend
obj._fn = obj._args = obj._kwargs = None
obj._shape = tuple(map(int, shape))
obj._dtype = dtype
obj._data = '__PLACEHOLDER__'
obj._deps = ()
return obj
def to(
self,
fn,
args,
kwargs=None,
backend=None,
shape=None,
dtype=None,
deps=None,
):
"""Create a new ``LazyArray``, by default propagating backend, shape,
dtype and deps from the the current LazyArray.
"""
return LazyArray(
fn=fn,
args=args,
kwargs=kwargs,
backend=backend if backend is not None else self._backend,
shape=shape if shape is not None else self.shape,
dtype=dtype if dtype is not None else self.dtype,
deps=deps if deps is not None else (self,),
)
def _materialize(self):
"""Recursively compute all required args and kwargs for this node
before computing itself and dereferencing dependencies. Note using this
to materialize a large computation from scratch should be avoided due
to the recursion limit, use ``x.compute()`` instead.
"""
if self._data is None:
# materialize any actual array args
args = (maybe_materialize(x) for x in self._args)
kwargs = {k: maybe_materialize(v) for k, v in self._kwargs.items()}
self._data = self._fn(*args, **kwargs)
# free any references to deps
self._fn = self._args = self._kwargs = None
self._deps = ()
return self._data
def __iter__(self):
"""Generate each unique computational node. Use ``ascend`` if you need
to visit children before parents.
"""
seen = set()
queue = [self]
queue_pop = queue.pop
queue_extend = queue.extend
seen_add = seen.add
while queue:
node = queue_pop()
nid = id(node)
if nid not in seen:
yield node
queue_extend(node._deps)
seen_add(nid)
def ascend(self):
"""Generate each unique computational node, from leaves to root.
"""
seen = set()
ready = set()
queue = [self]
queue_extend = queue.extend
queue_pop = queue.pop
ready_add = ready.add
seen_add = seen.add
while queue:
node = queue[-1]
need_to_visit = [c for c in node._deps if id(c) not in ready]
if need_to_visit:
queue_extend(need_to_visit)
else:
node = queue_pop()
nid = id(node)
ready_add(nid)
if nid not in seen:
yield node
seen_add(nid)
def compute(self):
"""Compute the value of this lazy array.
Unlike ``self._materialize()`` this avoids deep recursion.
"""
for node in self.ascend():
node._materialize()
return self._data
def compute_constants(self, variables):
"""Fold constant arrays - everything not dependent on ``variables`` -
into the graph.
"""
if isinstance(variables, LazyArray):
variables = (variables,)
variables = set(variables)
# must ascend
for node in self.ascend():
if not any(c in variables for c in node._deps):
# can fold
node._materialize()
else:
# mark as variable
variables.add(node)
def as_string(self, params):
"""Create a string which evaluates to the lazy array creation.
"""
# name function and store in locals
fn_name = f"{getattr(self._fn, '__name__', 'fn')}{id(self._fn)}"
params.setdefault(fn_name, self._fn)
# string of args and kwargs
str_call = ", ".join(
itertools.chain(
(stringify(x, params) for x in self._args),
(
f"{k}: {stringify(v, params)}"
for k, v in self._kwargs.items()
),
)
)
# assign function call to new variable
return f"x{id(self)} = {fn_name}({str_call})"
def get_source(self, params=None):
"""Write the source code of an unravelled version of the computational
graph, injecting required runtime objects into ``params``.
"""
if params is None:
# locals space mapping LazyArray names to values
params = {}
delete_checked = set()
s = [] # source code lines
for node in reversed(tuple(self.ascend())):
# when *descending*, the first encounter of a node is the
# *last* time it is referenced in forward pass -> delete,
# need to do this for GC since running in single big function
for c in node._deps:
if c not in delete_checked:
if c._deps:
# is an intermediate - safe to delete
s.append(f"del x{id(c)}")
delete_checked.add(c)
if node._data is None:
# create the array via computation
s.append(node.as_string(params))
else:
# inject the already computed data as constant
params[f"x{id(node)}"] = node._data
# reverse (ascend) into source code
return "\n".join(reversed(s))
def get_compiled(self, optimize=1):
"""Compile the function into a code object using ``compile``,
returning a wrapper that executes it using ``exec`` and the 'locals'
dict specifiying inputs which can be modified. It should be called
like:
fn, params = x.get_compiled()
# modify params e.g. inject new arrays here before call
...
fn(params)
"""
# write source and populate locals mapping that function will run under
params = {}
source = self.get_source(params)
# compile source
code = compile(source, f"code{id(self)}", "exec", optimize=optimize)
compiled = functools.partial(
_code_exec_fn, code=code, out_name=f"x{id(self)}"
)
# need both function and locals mapping to run it with / modify args
return compiled, params
def get_function(self, variables, fold_constants=True):
"""Get a compiled function that computes ``fn(arrays)``, with ``fn``
describing the computational graph of this ``LazyArray`` and ``arrays``
corresponding to the downstream ``LazyArray`` nodes ``variables``.
Parameters
----------
variables : sequence of LazyArray
Input nodes whose data can change between calls.
fold_constants : bool, optional
Compute all intermediates which do not depend on ``variables``
prior to compilation.
Returns
-------
fn : callable
Function with signature ``fn(arrays)``.
"""
if fold_constants:
self.compute_constants(variables=variables)
var_names = tuple(f"x{id(v)}" for v in variables)
fn, params = self.get_compiled()
return functools.partial(
_array_fn, var_names=var_names, params=params, fn=fn
)
def history_max_size(self):
"""Get the largest single tensor size appearing in this computation.
"""
return max(node.size for node in self)
def history_size_footprint(self):
"""Get the combined size of intermediates at each step of the
computation. Note this assumes that intermediates are immediately
garbage collected when they are no longer required.
"""
delete_checked = set()
sizes = []
for node in reversed(tuple(self.ascend())):
for c in node._deps:
if c not in delete_checked:
# last time a dependency is seen, subtract the size
if c._deps:
sizes.append(-c.size)
delete_checked.add(c)
if node._data is None:
# this is a new intermediate, add the size
sizes.append(+node.size)
sizes.reverse()
return list(itertools.accumulate(sizes))
def history_peak_size(self):
"""Get the peak combined intermediate size of this computation.
"""
return max(self.history_size_footprint())
def history_total_size(self):
"""The the total size of all unique arrays in the computational graph,
possibly relevant e.g. for back-propagation algorithms.
"""
return sum(node.size for node in self)
def plot_history_size_footprint(
self,
log=None,
figsize=(8, 2),
color='purple',
alpha=0.5,
ax=None,
return_fig=False,
):
"""Plot the memory footprint throughout this computation.
Parameters
----------
log : None or int, optional
If not None, display the sizes in base ``log``.
figsize : tuple, optional
Size of the figure.
color : str, optional
Color of the line.
alpha : float, optional
Alpha of the line.
ax : matplotlib.axes.Axes, optional
Axes to plot on, will be created if not provided.
return_fig : bool, optional
If True, return the figure object, else just show and close it.
"""
import matplotlib.pyplot as plt
y = np.array(self.history_size_footprint())
if log:
y = np.log2(y) / np.log2(log)
ylabel = f'$\\log_{log}[SIZE]$'
else:
ylabel = 'SIZE'
x = np.arange(y.size)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
ax.fill_between(x, 0, y, alpha=alpha, color=color)
if fig is not None:
ax.grid(True, c=(0.95, 0.95, 0.95), which='both')
ax.set_axisbelow(True)
ax.set_xlim(0, | np.max(x) | numpy.max |
# -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
"""
Library to handle SPM data.
This is the core module of all images retrieved by SPM and ToF-SIMS.
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.ndimage
import scipy.optimize
import skimage
import skimage.exposure
import skimage.filters
import scipy.interpolate
from skimage import transform as tf
import copy
from .utils import CDF, funit
import sys
import matplotlib as mpl
import warnings
from .utils.misc import PB
try:
from skimage.filters import threshold_local
except:
# For compatibility with old versions of skimage
from skimage.filters import threshold_adaptive as threshold_local
class SPM_image:
"""
Main class to handle SPM images.
This class contains the pixels data of the images as well as it's real size.
It also provides a lot of tools to correct and perform various analysis and tasks on the image.
"""
def __init__(self, BIN, channel='Topography',
corr=None, real=None, zscale='?', _type='Unknown'):
"""
Create a new SPM_image
Parameters
----------
BIN : 2D numpy array
The pixel values of the image as a 2D numpy array
channel : string
The name of the channel. What does the image represents?
corr : string or None
'slope' : correct the SPM image for its slope (see pySPM.SPM.SPM_image.correct_slope)
'lines' : correct the SPM image for its lines (see pySPM.SPM.SPM_image.correct_lines)
'plane' : correct the SPM image by plane fitting (see pySPM.SPM.SPM_image.correct_plane)
real : None or dictionary
Information about the real size of the image {'x':width,'y':height,'unit':unit_name}
zscale : string
Unit used to describe the z-scale. (units of the data of BIN)
_type : string
represent the type of measurement
"""
self.channel = channel
self.direction = 'Unknown'
self.size = {'pixels': {'x': BIN.shape[1], 'y': BIN.shape[0]}}
if not real is None:
self.size['real'] = real
else:
self.size['real'] = {'unit': 'pixels',
'x': BIN.shape[1], 'y': BIN.shape[0]}
if not 'unit' in self.size['real']:
self.size['real']['unit'] = 'px'
self.pixels = BIN
self.type = _type
self.zscale = zscale
if corr is not None:
if corr.lower() == 'slope':
self.correct_slope()
elif corr.lower() == 'lines':
self.correct_lines()
elif corr.lower() == 'plane':
self.correct_plane()
def __add__(self, b):
"""
Add up two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels += b.pixels
New.channel += " + "+b.channel
elif type(b) in [int, float]:
New.pixels += b
New.channels += " + {:.2f}".format(b)
return New
def __sub__(self, b):
"""
Subtract two images. This is a low level function and no check is performed to proof that both images have the same size.
"""
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels -= b.pixels
New.channel += " - "+b.channel
elif type(b) in [int, float]:
New.pixels -= b
New.channels += " - {:.2f}".format(b)
return New
def __mul__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels *= b.pixels
New.channel = "({})*{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels *= b
New.channels = "({})*{:.2f}".format(New.channel,b)
return New
def __div__(self, b):
New = copy.deepcopy(self)
if isinstance(b, SPM_image):
New.pixels /= b.pixels
New.channel = "({})/{}".format(New.channel,b.channel)
elif type(b) in [int, float]:
New.pixels /= b
New.channels = "({})/{:.2f}".format(New.channel,b)
return New
def pxs(self):
"""
Return the pixel size
"""
fxy = {xy: funit(self.size['real'][xy], self.size['real']['unit']) for xy in 'xy'}
return [(fxy[xy]['value']/self.size['pixels'][xy], fxy[xy]['unit']) for xy in 'xy']
def add_scale(self, length, ax=None, height=20, margin=5, color='w', loc=4, text=True, pixels=None, fontsize=20, edge_color='k', edge_width=3):
"""
Display a scale marker on an existing image
Parameters
----------
length : float
The length of the scale in real units
ax : matplotlib axis
if None the current axis will be taken (plt.gca())
height : int
The height of the scale bar in pixels
color : string
The color used to display the scale bar
loc : int
The location of the scale bar.
1 : top right
2 : top left
3 : bottom left
4 : bottom right
text : bool
display the size of the scale on top of it?
pixels : bool
Is the image plotted in ax with a x/y scale in pixels?
fontsize : float
The fontsize used to display the text
Example
-------
>>> img = pySPM.SPM_image()
>>> img.show()
>>> img.add_scale(50e-6, pixels=False);
Add a scale of 50 μm on an image displayed with real units
>>> img = pySPM.SPM_image()
>>> img.show(pixels=True)
>>> img.add_scale(50e-6);
Add a scale of 50 μm on an image displayed in pixels
"""
import matplotlib.patches
import matplotlib.patheffects as PathEffects
fL = length/self.size['real']['x']
L = self.size['pixels']['x']*fL
fH = height/self.size['pixels']['y']
if ax is None:
ax = plt.gca()
if pixels is None:
if hasattr(ax, 'isPixel'):
pixels = ax.isPixel
else:
pixels = False
flipped = False
if hasattr(ax, 'flipped'):
flipped = ax.flipped
if type(loc) is int:
assert loc in [1, 2, 3, 4]
ref = ax.transAxes.transform({1:(1-fL,0),2:(0,0),3:(0,1-fH),4:(1-fL,1-fH)}[loc])
if loc in [2,3]:
ref[0] += margin
else:
ref[0] -= margin
if loc in [1,2]:
ref[1] += margin
else:
ref[1] -= margin
else:
assert type(loc) in [tuple, list]
assert len(loc)==2
ref = ax.transData.transform(loc) + ax.transAxes.transform((-fL/2,-fH/2)) - ax.transAxes.transform((0,0))
inv = ax.transData.inverted()
ref = inv.transform(ref)
WH = inv.transform(ax.transAxes.transform((fL,fH)))-inv.transform(ax.transAxes.transform((0,0)))
rect = ax.add_patch(matplotlib.patches.Rectangle(ref, width=WH[0], height=WH[1], color=color))
if text:
r = funit(length, self.size['real']['unit'])
if r['unit'][0] == 'u':
r['unit'] = '$\\mu$' + r['unit'][1:]
if loc in [3,4]:
label_ref = [ref[0]+WH[0]/2, ref[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="top", ha="center")
else:
label_ref = [ref[0]+WH[0]/2, ref[1]+WH[1]]
ann = ax.annotate("{value:.01f} {unit}".format(**r),
label_ref, color=color,
fontsize=fontsize, va="bottom", ha="center")
ann.set_path_effects([PathEffects.withStroke(linewidth=edge_width, foreground=edge_color)])
def offset(self, profiles, width=1, ax=None, col='w', inline=True, **kargs):
"""
Correct an image by offsetting each row individually in order that the lines passed as argument in "profiles" becomes flat.
Parameters
----------
profiles: list of list
each sublist represent a line as [x1, y1, x2, y2] in pixels known to be flat
width : int, float
the line width in pixels used for better statistics
ax : matplotlib axis or None
If not None, axis in which the profiles will be plotted in
inline : bool
If True perform the correction on the current object, otherwise return a new image
col : string
matrplotlib color used to plot the profiles (if ax is not None)
labels : bool
display a label number with each profile
**kargs: arguments passed further to get_row_profile.
axPixels: set to True if you axis "ax" have the data plotted in pixel instead of real distance
Example
-------
Exampel if the data are plotted in pixels:
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False,axPixels=True)
>>> topo.show(pixels=True, ax=ax[0])
>>> topoC.show(ax=ax[1]);
Example if the data are plotted with real units
>>> topo = pySPM.SPM_image(...)
>>> fig, ax = plt.subplots(1, 2, figsize=(10, 5))
>>> topoC = topo.offset([[150, 0, 220, 255]], inline=False)
>>> topo.show(ax=ax[0])
>>> topoC.show(ax=ax[1]);
"""
offset = np.zeros(self.pixels.shape[0])
counts = np.zeros(self.pixels.shape[0])
for i, p in enumerate(profiles):
if kargs.get('labels', False):
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, label=str(i), **kargs)
else:
y, D = self.get_row_profile(*p, width=width, ax=ax, col=col, **kargs)
counts[y] += 1
offset[y[1:]] += np.diff(D)
counts[counts == 0] = 1
offset = offset/counts
offset = np.cumsum(offset)
offset = offset.reshape((self.pixels.shape[0], 1))
if inline:
self.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return self
else:
C = copy.deepcopy(self)
C.pixels = self.pixels - \
np.flipud(np.repeat(offset, self.pixels.shape[1], axis=1))
return C
def pxRect2Real(self, xy, width, height):
"""
Transform a xy, width, height data in pixels to an equivalentz one with real units
"""
ll = self.px2real(xy[0],xy[1])
ur = self.px2real(xy[0]+width,xy[1]+height)
return ll,ur[0]-ll[0],ur[1]-ll[1]
def get_row_profile(self, x1, y1, x2, y2, width=1, col='C1', ax=None, alpha=0, **kargs):
"""
Get a profile per row along a given line. This function is mainly useful for the function offset.
x1, y1, x2, y2: int
coordinates of the line.
width : int
the width of the line used for statistics (in pixels)
col: string
color used to plot the line position
ax : matplotlib axis
axis in which the lines position will plotted
alpha : float
The alpha channel of the line color (≥0 and ≤1)
**kargs:
line style arguments: linewidth, color and linestyle
axis units: axPixels set to True if ax has the image plotted in pixels.
Returns
-------
Y coordinates : 1D numpy array
distance along the profile starting at 0
Z coordinates : 1D numpy array
profile
"""
plotargs = { key: kargs[key] for key in ['linewidth', 'color', 'linestyle'] if key in kargs }
if y2 < y1:
x1, y1, x2, y2 = x2, y2, x1, y1
if ax is not None:
d = np.sqrt((x2-x1)**2+(y2-y1)**2)
dx = -width/2*(y2-y1)/d
dy = width/2*(x2-x1)/d
if kargs.get('axPixels', False):
ax.plot([x1-dx, x1+dx], [y1-dy, y1+dy], col)
ax.plot([x2-dx, x2+dx], [y2-dy, y2+dy], col)
ax.plot((x1, x2), (y1, y2), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2),.5*(y1+y2)), color=col)
if alpha>0:
import matplotlib.patches
ax.add_patch(matplotlib.patches.Rectangle((x1+dx,y1+dy),width, d, -np.degrees(np.arctan2(x2-x1,y2-y1)), color=col, alpha=alpha))
else:
h = self.pixels.shape[0]
pxs = self.size['real']['x'] / self.pixels.shape[1]
pys = self.size['real']['y'] / h
ax.plot([(x1-dx)*pxs, (x1+dx)*pxs], [(h-(y1-dy))*pys, (h-(y1+dy))*pys], col)
ax.plot([(x2-dx)*pxs, (x2+dx)*pxs], [(h-(y2-dy))*pys, (h-(y2+dy))*pys], col)
ax.plot((x1*pxs, x2*pxs), ((h-y1)*pys, (h-y2)*pys), col, **plotargs)
if kargs.get('label', False):
ax.annotate(kargs.get('label'), (.5*(x1+x2)*pxs,.5*(2*h-y1-y2)*pys), color=col)
if alpha>0:
import matplotlib.patches
W = np.sqrt((2*dx*pxs)**2+(2*dy*pys)**2)
L = np.sqrt(((x2-x1)*pxs)**2+((y2-y1)*pys)**2)
ax.add_patch(matplotlib.patches.Rectangle(((x1+dx)*pxs,(y1+dy)*pys), W, L, -np.degrees(np.arctan2((x2-x1)*pxs,(y2-y1)*pys)), color=col, alpha=alpha))
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
I = scipy.interpolate.interp2d(x, y, np.flipud(self.pixels))
Y = np.arange(y1, y2+1)
V = np.zeros(len(Y))
for w in np.arange(width):
xl = np.linspace(x1-(width-1)/2.+w, x2-(width-1)/2.+w, len(Y))
for i in range(len(Y)):
Z = I(xl[i], Y[i])
V[i] += Z
return Y, V/width
def correct_median_diff(self, inline=True):
"""
Correct the image with the median difference
"""
N = self.pixels
# Difference of the pixel between two consecutive row
N2 = np.vstack([N[1:, :], N[-1:, :]])-N
# Take the median of the difference and cumsum them
C = np.cumsum(np.median(N2, axis=1))
# Extend the vector to a matrix (row copy)
D = np.tile(C, (N.shape[0], 1)).T
if inline:
self.pixels = N-D
else:
New = copy.deepcopy(self)
New.pixels = N-D
return New
def correct_slope(self, inline=True):
"""
Correct the image by subtracting a fitted slope along the y-axis
"""
s = np.mean(self.pixels, axis=1)
i = np.arange(len(s))
fit = np.polyfit(i, s, 1)
if inline:
self.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.polyval(fit, i).reshape(len(i), 1), len(i))
return New
def correct_plane(self, inline=True, mask=None):
"""
Correct the image by subtracting a fitted 2D-plane on the data
Parameters
----------
inline : bool
If True the data of the current image will be updated otherwise a new image is created
mask : None or 2D numpy array
If not None define on which pixels the data should be taken.
"""
x = np.arange(self.pixels.shape[1])
y = np.arange(self.pixels.shape[0])
X0, Y0 = np.meshgrid(x, y)
Z0 = self.pixels
if mask is not None:
X = X0[mask]
Y = Y0[mask]
Z = Z0[mask]
else:
X = X0
Y = Y0
Z = Z0
A = np.column_stack((np.ones(Z.ravel().size), X.ravel(), Y.ravel()))
c, resid, rank, sigma = np.linalg.lstsq(A, Z.ravel(), rcond=-1)
if inline:
self.pixels -= c[0] * \
np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return self
else:
New = copy.deepcopy(self)
New.pixels -= c[0]*np.ones(self.pixels.shape) + c[1] * X0 + c[2] * Y0
return New
def correct_lines(self, inline=True):
"""
Subtract the average of each line for the image.
if inline is True the current data are updated otherwise a new image with the corrected data is returned
"""
if inline:
self.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return self
else:
New = copy.deepcopy(self)
New.pixels -= np.tile(np.mean(self.pixels, axis=1).T, (self.pixels.shape[0], 1)).T
return New
def dist_v2(self, pixel=False):
"""
Return a 2D array with the distance between each pixel and the closest border.
Might be usefull for FFT filtering
"""
if pixel:
dx = 1
dy = 1
else:
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
x2 = np.arange(self.size['pixels']['x'])
x2 = (np.minimum(x2, self.size['pixels']['x']-x2) * dx)**2
y2 = np.arange(self.size['pixels']['y'])
y2 = (np.minimum(y2, self.size['pixels']['y'] - y2) * dy)**2
X, Y = np.meshgrid(x2, y2)
return np.sqrt(X+Y)
def inv_calc_flat(self, d, l=0.1):
"""
Function used for inverse MFM calculation (inspired from http://qmfm.empa.ch/qmfm/)
The function is in its early devlopment stage as not used by the developed.
Parameters
----------
d : float
Height distance in the input data
l : float
Tikhonov parameter for the deconvolution
"""
work_image = self.pixels
ny, nx = self.pixels.shape
dx = self.size['real']['x']/self.size['pixels']['x']
dy = self.size['real']['y']/self.size['pixels']['y']
k = self.dist_v2()
k[0, 0] = 1e-10
tf = np.exp(-d*k)
tf[0, 0] = np.mean(tf)
tf /= 2
tf *= 1-np.exp(-d * k)
recon_tf = np.ones(tf.shape) / (tf+l*np.ones(tf.shape) / np.conj(tf))
tf *= recon_tf
return np.real(np.fft.ifft2(np.fft.fft2(work_image)*recon_tf))
def get_extent(self):
"""
Get the image extent in real data
"""
if 'recorded' in self.size:
W = self.size['recorded']['real']['x']
H = self.size['recorded']['real']['y']
else:
W = self.size['real']['x']
H = self.size['real']['y']
return (0, W, 0, H)
def show(self, ax=None, sig=None, cmap=None, title=None,
adaptive=False, dmin=0, dmax=0, pixels=False, flip=False, wrap=None, mul=1, symmetric=False, **kargs):
"""
Function to display the image with a lot of parametrization
Parameters
----------
ax : matplotlib axis or None
matplotlib axis if given otherwise current axis will be used (plt.gca())
sig : float
sigma values to adjust the contrast range around the mean ±sig times the standard-deviation
cmap : string
colormap name used. By default a gray map is used. If the zscale of the data are in 'meter' (i.e. topography data) the 'hot' colormap is used
title : string
The title of the plot. By default is the channel name
adaptive : bool
The color scale used is linear. If adaptive is True a non linear color scale is used in order that each color is used with the same amount.
dmin : float
minimum value adjustment used for the colorscale
dmax: float
maximum value adjustment used for the colorscale
pixels : bool
Display the image with x/y-labels with real unit. If pixels is True, the axes are in pixels
flip : bool
Flip the image upside-down
wrap : Nont or int
wrap the title to a width of wrap chars
symmetric : bool
If True will place the middle of the colorscale to the value 0.
This is specially usefull for diverging colormaps such as : BrBG, bwr, coolwarm, seismiv, spectral, etc.
level : float
level should be ≥0 and <50. Adjust the lower and upper colorscale to level% and (100-level)% of the data range.
e.g. if level=1, the colorscale will display 1-99% of the data range
vmin : float
Minimum value used for the colorscale
vmax : flaot
Maximum value used for the colorscale
Returns
-------
matplotlib.image.AxesImage
matplolib axis instance returned by imshow
Examples
--------
>>> topo = pySPM.SPM_image(...)
>>> fig, (ax, ax2) = plt.subplots(2, 3, figsize=(15, 10))
>>> topo.show(ax=ax[0], cmap='gray', title="color map=\"gray\"")
>>> topo.show(ax=ax[1], sig=2, title="standard deviation=2")
>>> topo.show(ax=ax[2], adaptive=True, title="Adaptive colormap")
>>> topo.show(ax=ax2[0], dmin=4e-8, cmap='gray', title="raise the lowest value for the colormap of +40nm")
>>> topo.show(ax=ax2[1], dmin=3e-8, dmax=-3e-8, cmap='gray',title="raise lower of +30nm and highest of -30nm")
>>> topo.show(ax=ax2[2], pixels=True, title="Set axis value in pixels");
"""
mpl.rc('axes', grid=False)
if ax is None:
ax = plt.gca()
ax.src = self
if title == None:
title = u"{0} - {1}".format(self.type, self.channel)
if wrap is not None:
title = "\n".join([title[i*wrap:(i+1)*wrap]
for i in range(int(len(title)/wrap)+1)])
unit = self.size['real']['unit']
sunit = 'afpnum kMGTPE'
if len(unit) == 1 or unit in ['pixels']:
isunit = 6
elif unit[0] in sunit:
isunit = sunit.find(unit[0])
unit = unit[1:]
else:
isunit = 6
W = self.size['real']['x']
H = self.size['real']['y']
fact = int(np.floor(np.log(W)/np.log(10)/3))
isunit += fact
W, H = W/10**(fact*3), H/10**(fact*3)
if cmap == None:
cmap = 'gray'
if unit == 'm' and self.channel == "Topography":
cmap = 'hot'
mi, ma = np.nanmin(self.pixels), np.nanmax(self.pixels)
if adaptive:
img = np.asarray(256**2*(self.pixels-mi)/(ma-mi), dtype=np.uint16)
mi, ma = 0, 1
img = skimage.exposure.equalize_adapthist(img, clip_limit=0.03)
else:
img = mul*self.pixels
mi *= mul
ma *= mul
if sig == None:
vmin = mi+dmin
vmax = ma+dmax
else:
std = np.nanstd(img)
avg = np.nanmean(img)
vmin = avg - sig * std
vmax = avg + sig * std
if 'level' in kargs:
if kargs['level'] < 0 or kargs['level']>=50:
raise ValueError("The level shoud have a value in [0,50)")
vmax = np.percentile(img, 100-kargs['level'])
vmin = np.percentile(img, kargs['level'])
del kargs['level']
if 'vmin' in kargs:
vmin = kargs['vmin']
del kargs['vmin']
if 'vmax' in kargs:
vmax = kargs['vmax']
del kargs['vmax']
if symmetric:
vmax = abs(max(vmin,vmax))
vmin = -vmax
if not flip:
ax.flipped = False
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), extent=[0, W, 0, H], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.flipped = True
if pixels:
ax.isPixel = True
r = ax.imshow(np.flipud(img), extent=[0,img.shape[1],img.shape[0],0], cmap=cmap, vmin=vmin, vmax=vmax, **kargs)
else:
ax.isPixel = False
r = ax.imshow(np.flipud(img), cmap=cmap, extent=[0, W, 0, H], vmin=vmin, vmax=vmax, **kargs)
if pixels:
ax.set_xlim((0, self.pixels.shape[1]))
if flip:
ax.set_ylim((0, self.pixels.shape[0]))
else:
ax.set_ylim((self.pixels.shape[0], 0))
else:
ax.set_xlim((0,W))
if flip:
ax.set_ylim((H,0))
else:
ax.set_ylim((0,H))
if not pixels:
if isunit != 6:
u = sunit[isunit]
if u == 'u':
u = '$\\mu$'
ax.set_xlabel(u'x [{0}{1}]'.format(u, unit))
ax.set_ylabel(u'y [{0}{1}]'.format(u, unit))
else:
ax.set_xlabel(u'x [{0}]'.format(unit))
ax.set_ylabel(u'y [{0}]'.format(unit))
if title != None:
ax.set_title(title)
return r
def real2px(self, x, y):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
return self.real2pixels(x,y)
def real2pixels(self, x, y, float=False):
"""
Transform a real (x,y) value in pixels
Units should be the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
if not float:
px = np.digitize(x, np.linspace(0,self.size['real']['x']/(10**fact),self.pixels.shape[1]), right=True)
py = np.digitize(y, np.linspace(0,self.size['real']['y']/(10**fact),self.pixels.shape[0]), right=False)
else:
px = x*(self.pixels.shape[1]-1)/(self.size['real']['x']/(10**fact))
py = y*(self.pixels.shape[0]-1)/(self.size['real']['y']/(10**fact))
return px, py
def px2real(self, x, y):
"""
Transform a (x,y) value from pixels to real
Units are the same as the one plotted by pySPM.SPM_image.show
"""
W = self.size['real']['x']
fact = int(np.floor(np.log(W)/np.log(10)/3))*3
rx = x*self.size['real']['x']/(10**fact)/self.pixels.shape[1]
ry = (self.pixels.shape[0]-y)*self.size['real']['y']/(10**fact)/self.pixels.shape[0]
return rx, ry
def circular_profile(self, x0, y0, Ra=1, Rn=0, width=1, N=20, A=0, B=360,\
cmap='jet', axImg=None, axPolar=None, axProfile=None, plotProfileEvery=1,\
xtransf=lambda x: x*1e9, ytransf=lambda x:x*1e9,\
ToFcorr=False, fit=lambda x, *p: p[3]+p[2]*CDF(x, *p[:2]), p0=None, errors=False, bounds=(-np.inf, np.inf), fakefit=False, **kargs):
"""
Create radial profiles from point x0,y0 with length Ra (outer radius) and Rn (negative Radius).
Start from angle A° to angle B° with N profiles.
If you want to apply the ToF-correction, please set ToFcorr to the number of scans used to record the ToF-SIMS image.
Return the fitting uncertainty on sigma if errors is set to True
The fitting function can be adjusted by fit and the default parameters by p0 which is an array of function where the first parameter passed will be the x-values and the second the y-values.
"""
from matplotlib import colors, cm
# Create a colormap for each profile
CM = plt.get_cmap(cmap)
cNorm = colors.Normalize(vmin=0, vmax=N)
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=CM)
res = []
cov = []
angles = []
assert A<B
for i, angle in enumerate(np.linspace(A, B, N)):
a = np.radians(angle)
angles.append(a)
l, p = self.get_profile(
x0-Rn*np.cos(a),
y0+Rn*np.sin(a),
x0+Ra*np.cos(a),
y0-Ra*np.sin(a),
ax=axImg, width=width, color=scalarMap.to_rgba(i), **kargs)
if width==0:
profile = p
else:
profile = np.mean(p, axis=1)
if ToFcorr:
profile = -np.log(1.001-profile/ToFcorr)
if p0 is None:
AC = np.mean(profile[:len(l)//2])
AE = np.mean(profile[len(l)//2:])
if AC<AE:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), np.max(profile)-np.min(profile), np.min(profile) ]
else:
p0 = [l[len(l)//2], 5*(l[1]-l[0]), -np.max(profile)+np.min(profile), np.max(profile) ]
else:
for j,p in enumerate(p0):
if callable(p):
p0[j] = p(l,profile)
if kargs.get('debug',False):
print("calculate fit parameters are", p0)
if not fakefit:
p0, pcov = scipy.optimize.curve_fit(fit, l , profile, p0)
else:
pcov = np.zeros((len(p0),len(p0)))
res.append(p0)
cov.append([np.sqrt(abs(pcov[i,i])) for i in range(len(p0))])
if axProfile and i%plotProfileEvery == 0:
axProfile.plot(xtransf(l-p0[0]), profile, color=scalarMap.to_rgba(i), linestyle=':')
axProfile.plot(xtransf(l-p0[0]), fit(l,*p0), color=scalarMap.to_rgba(i))
# close loop
if A%360 == B%360:
angles.append(angles[0])
res.append(res[0])
cov.append(cov[0])
# Plot polar
angles = np.array(angles)
res = | np.array(res) | numpy.array |
import pdb
import numpy as np
import os
import tensorflow as tf
import math
from .data_utils import minibatches, pad_sequences, get_chunks
from .general_utils import Progbar
from .base_model import BaseModel
class NERModel(BaseModel):
"""Specialized class of Model for NER"""
def __init__(self, config):
super(NERModel, self).__init__(config)
self.idx_to_tag = {idx: tag for tag, idx in
self.config.vocab_tags.items()}
self.tag_to_idx = {tag: idx for tag, idx in
self.config.vocab_tags.items()}
def add_placeholders(self):
"""Define placeholders = entries to computational graph"""
# shape = (batch size, max length of sentence in batch)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None],
name="word_ids")
# shape = (batch size)
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],
name="sequence_lengths")
# shape = (batch size, max length of sentence, max length of word)
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None],
name="char_ids")
# shape = (batch_size, max_length of sentence)
self.word_lengths = tf.placeholder(tf.int32, shape=[None, None],
name="word_lengths")
# shape = (batch size, max length of sentence in batch)
self.labels = tf.placeholder(tf.int32, shape=[None, None],
name="labels")
# hyper parameters
self.dropout = tf.placeholder(dtype=tf.float32, shape=[],
name="dropout")
self.lr = tf.placeholder(dtype=tf.float32, shape=[],
name="lr")
def get_feed_dict(self, words, labels=None, lr=None, dropout=None):
"""Given some data, pad it and build a feed dictionary
Args:
words: list of sentences. A sentence is a list of ids of a list of
words. A word is a list of ids
labels: list of ids
lr: (float) learning rate
dropout: (float) keep prob
Returns:
dict {placeholder: value}
"""
# perform padding of the given data
if self.config.use_chars:
char_ids, word_ids = zip(*words)
word_ids, sequence_lengths = pad_sequences(word_ids, 0)
char_ids, word_lengths = pad_sequences(char_ids, pad_tok=0,
nlevels=2)
else:
word_ids, sequence_lengths = pad_sequences(words, 0)
# build feed dictionary
feed = {
self.word_ids: word_ids,
self.sequence_lengths: sequence_lengths
}
if self.config.use_chars:
feed[self.char_ids] = char_ids
feed[self.word_lengths] = word_lengths
if labels is not None:
labels, _ = pad_sequences(labels, 0)
feed[self.labels] = labels
if lr is not None:
feed[self.lr] = lr
if dropout is not None:
feed[self.dropout] = dropout
return feed, sequence_lengths
def add_word_embeddings_op(self):
"""Defines self.word_embeddings
If self.config.embeddings is not None and is a np array initialized
with pre-trained word vectors, the word embeddings is just a look-up
and we don't train the vectors. Otherwise, a random matrix with
the correct shape is initialized.
"""
with tf.variable_scope("words"):
if self.config.embeddings is None:
self.logger.info("WARNING: randomly initializing word vectors")
_word_embeddings = tf.get_variable(
name="_word_embeddings",
dtype=tf.float32,
shape=[self.config.nwords, self.config.dim_word])
else:
_word_embeddings = tf.Variable(
self.config.embeddings,
name="_word_embeddings",
dtype=tf.float32,
trainable=self.config.train_embeddings)
word_embeddings = tf.nn.embedding_lookup(_word_embeddings,
self.word_ids, name="word_embeddings")
with tf.variable_scope("chars"):
if self.config.use_chars:
# get char embeddings matrix
_char_embeddings = tf.get_variable(
name="_char_embeddings",
dtype=tf.float32,
shape=[self.config.nchars, self.config.dim_char])
char_embeddings = tf.nn.embedding_lookup(_char_embeddings,
self.char_ids, name="char_embeddings")
# put the time dimension on axis=1
s = tf.shape(char_embeddings)
char_embeddings = tf.reshape(char_embeddings,
shape=[s[0]*s[1], s[-2], self.config.dim_char])
word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])
# bi lstm on chars
cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
_output = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, char_embeddings,
sequence_length=word_lengths, dtype=tf.float32)
# read and concat output
_, ((_, output_fw), (_, output_bw)) = _output
output = tf.concat([output_fw, output_bw], axis=-1)
# shape = (batch size, max sentence length, char hidden size)
output = tf.reshape(output,
shape=[s[0], s[1], 2*self.config.hidden_size_char])
word_embeddings = tf.concat([word_embeddings, output], axis=-1)
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)
def add_logits_op(self):
"""Defines self.logits
For each word in each sentence of the batch, it corresponds to a vector
of scores, of dimension equal to the number of tags.
"""
with tf.variable_scope("bi-lstm"):
cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)
cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_lstm)
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, self.word_embeddings,
sequence_length=self.sequence_lengths, dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.nn.dropout(output, self.dropout)
with tf.variable_scope("proj"):
W = tf.get_variable("W", dtype=tf.float32,
shape=[2*self.config.hidden_size_lstm, self.config.ntags])
b = tf.get_variable("b", shape=[self.config.ntags],
dtype=tf.float32, initializer=tf.zeros_initializer())
nsteps = tf.shape(output)[1]
output = tf.reshape(output, [-1, 2*self.config.hidden_size_lstm])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, nsteps, self.config.ntags])
def add_pred_op(self):
"""Defines self.labels_pred
This op is defined only in the case where we don't use a CRF since in
that case we can make the prediction "in the graph" (thanks to tf
functions in other words). With theCRF, as the inference is coded
in python and not in pure tensroflow, we have to make the prediciton
outside the graph.
"""
if not self.config.use_crf:
self.labels_pred = tf.cast(tf.argmax(self.logits, axis=-1),
tf.int32)
def add_loss_op(self):
"""Defines the loss"""
if self.config.use_crf:
log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(
self.logits, self.labels, self.sequence_lengths)
self.trans_params = trans_params # need to evaluate it for decoding
self.loss = tf.reduce_mean(-log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.labels)
mask = tf.sequence_mask(self.sequence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
# for tensorboard
tf.summary.scalar("loss", self.loss)
def build(self):
# NER specific functions
self.add_placeholders()
self.add_word_embeddings_op()
self.add_logits_op()
self.add_pred_op()
self.add_loss_op()
# Generic functions that add training op and initialize session
self.add_train_op(self.config.lr_method, self.lr, self.loss,
self.config.clip)
self.initialize_session() # now self.sess is defined and vars are init
def predict_batch(self, words):
"""
Args:
words: list of sentences
Returns:
labels_pred: list of labels for each sentence
sequence_length
"""
fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)
if self.config.use_crf:
# get tag scores and transition params of CRF
viterbi_sequences = []
scores = []
logits, trans_params = self.sess.run(
[self.logits, self.trans_params], feed_dict=fd)
#logits = sigmoid_v(logits)
#trans_params = sigmoid_v(trans_params)
# iterate over the sentences because no batching in vitervi_decode
for logit, sequence_length in zip(logits, sequence_lengths):
logit = logit[:sequence_length] # keep only the valid steps
#print("Logit ", logit)
viterbi_seq, viterbi_score = tf.contrib.crf.viterbi_decode(
logit, trans_params)
viterbi_sequences += [viterbi_seq]
#print('trans_params ', trans_params)
#print('Sequence ', viterbi_seq)
#print(sequence_length)
#print(len(viterbi_seq))
#print('Score ', viterbi_score)#Use to decide least-uncertainty
if self.config.active_strategy=="nus":
viterbi_score = float(viterbi_score/sequence_length)
else:
viterbi_score = active_strategy(logit, trans_params, self.config.active_strategy, self.tag_to_idx)
scores.append(viterbi_score)
return viterbi_sequences, sequence_lengths, scores
else:
labels_pred = self.sess.run(self.labels_pred, feed_dict=fd)
return labels_pred, sequence_lengths, None
def run_epoch(self, train, dev, epoch):
"""Performs one complete pass over the train set and evaluate on dev
Args:
train: dataset that yields tuple of sentences, tags
dev: dataset
epoch: (int) index of the current epoch
Returns:
f1: (python float), score to select model on, higher is better
"""
# progbar stuff for logging
batch_size = self.config.batch_size
nbatches = (len(train) + batch_size - 1) // batch_size
#prog = Progbar(target=nbatches)
# iterate over dataset
for i, (words, labels) in enumerate(minibatches(train, batch_size)):
#print(words, labels)
fd, _ = self.get_feed_dict(words, labels, self.config.lr,
self.config.dropout)
_, train_loss, summary = self.sess.run(
[self.train_op, self.loss, self.merged], feed_dict=fd)
#prog.update(i + 1, [("train loss", train_loss)])
# tensorboard
if i % 10 == 0:
self.file_writer.add_summary(summary, epoch*nbatches + i)
metrics = self.run_evaluate(dev)
msg = "Accuracy " + str(metrics["acc"]) + " - F1 " + str(metrics["f1"])
#msg = " - ".join(["{} {:04.2f}".format(k, v)
# for k, v in metrics.items()])
print(msg)
self.logger.info(msg)
return metrics["f1"]
def run_evaluate(self, test, mode="train"):
"""Evaluates performance on test set
Args:
test: dataset that yields tuple of (sentences, tags)
Returns:
metrics: (dict) metrics["acc"] = 98.4, ...
"""
accs = []
l = []
#correct_preds_ne, total_correct_ne, total_preds_ne = 0.,0.,0.
s= ""
correct_preds, total_correct, total_preds = 0., 0., 0.
for words, labels in minibatches(test, self.config.batch_size):
#print(words,labels)
labels_pred, sequence_lengths, prob = self.predict_batch(words)
#pdb.set_trace()
#l.append((list(words),prob)) #list of words, list of scores corresponding
#l += prob
#print('labels_pred ', labels_pred)
if 'test' in mode:
for lab, pred in zip(labels, labels_pred):
#print('lab',lab)
#print('pred',pred)
for i,j in zip(lab,pred):
s+=self.idx_to_tag[i] + '\t' + self.idx_to_tag[j] + '\n'
s+='\n'
for lab, lab_pred, length in zip(labels, labels_pred,
sequence_lengths):
lab = lab[:length]
lab_pred = lab_pred[:length]
accs += [a==b for (a, b) in zip(lab, lab_pred)]
lab_chunks = set(get_chunks(lab, self.config.vocab_tags))
lab_pred_chunks = set(get_chunks(lab_pred,
self.config.vocab_tags))
correct_preds += len(lab_chunks & lab_pred_chunks)
total_preds += len(lab_pred_chunks)
total_correct += len(lab_chunks)
#print("Total Preds ", total_preds)
#print("Total correct ", total_correct)
#print("Correct preds ", correct_preds)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
if "test" in mode:
f = open(self.config.file_out + "_" + mode,'w')
f.write(s)
f.close()
#Sort l to get most/least uncertain
#l2 = sorted(l)
#mu = []
#lu = []
#for i in range(0,self.config.num_query):
# mu.append(l.index(l2[i]))
# lu.append(l.index(l2[len(l2)-i-1]))
#l = sorted(l, key=lambda pr: pr[2])
#pdb.set_trace()
#print("l",l)
#return acc, f1, list of most uncertainty and list of least uncertainty examples
#return {"acc": 100*acc, "f1": 100*f1, "out":l}
return {"acc": 100*acc, "f1": 100*f1}
#return {"acc": 100*acc, "f1": 100*f1, "mu": l[0:self.config.num_query], "lu": l[len(l)-self.config.num_query: len(l)]}
def predict(self, words_raw):
"""Returns list of tags
Args:
words_raw: list of words (string), just one sentence (no batch)
Returns:
preds: list of tags (string), one for each word in the sentence
"""
#words = [self.config.processing_word(w) for w in words_raw] #this is used for word raw
#print(words)
words = words_raw
words_o = list(words)
#print(words_o)
if type(words[0]) == tuple:
words = zip(*words)
#print(words)
pred_ids, _, scores = self.predict_batch([words])
#print("Prediction: ")
#print(pred_ids, _, scores)
preds = [self.idx_to_tag[idx] for idx in list(pred_ids[0])]
return (words_o, scores)
#return preds
def active_strategy(score, transition_params, active_strategy, tag_to_idx):
"""
Args: output of CRF
score: A [seq_len, num_tags] matrix of unary potentials.
transition_params: A [num_tags, num_tags] matrix of binary potentials.
"""
if active_strategy=="cluster":
return score
trellis = np.zeros_like(score)
backpointers = np.zeros_like(score, dtype=np.int32)
trellis[0] = score[0]
for t in range(1, score.shape[0]):
v = np.expand_dims(trellis[t - 1], 1) + transition_params
trellis[t] = score[t] + np.max(v, 0)
backpointers[t] = np.argmax(v, 0)
viterbi = [np.argmax(trellis[-1])]
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
score_final = np.max(trellis[-1]) #Score of sequences (higher = better)
if (active_strategy=='mg'):
top_scores = trellis[-1][ | np.argsort(trellis[-1]) | numpy.argsort |
import numpy as np
from .qnumber import is_qsparse
__all__ = ['retained_bond_indices', 'split_matrix_svd', 'qr']
def retained_bond_indices(s, tol):
"""
Indices of retained singular values based on given tolerance.
"""
w = np.linalg.norm(s)
if w == 0:
return np.array([], dtype=int)
# normalized squares
s = (s / w)**2
# accumulate values from smallest to largest
sort_idx = np.argsort(s)
s[sort_idx] = np.cumsum(s[sort_idx])
return np.where(s > tol)[0]
def split_matrix_svd(A, q0, q1, tol):
"""
Split a matrix by singular value decomposition,
taking block sparsity structure dictated by quantum numbers into account,
and truncate small singular values based on tolerance.
"""
assert A.ndim == 2
assert len(q0) == A.shape[0]
assert len(q1) == A.shape[1]
assert is_qsparse(A, [q0, -q1])
# find common quantum numbers
qis = np.intersect1d(q0, q1)
if len(qis) == 0:
assert np.linalg.norm(A) == 0
# special case: no common quantum numbers;
# use dummy intermediate dimension 1
u = np.zeros((A.shape[0], 1), dtype=A.dtype)
v = np.zeros((1, A.shape[1]), dtype=A.dtype)
s = np.zeros(1)
# single column of 'u' should have norm 1
if A.shape[0] > 0:
u[0, 0] = 1
# ensure non-zero entry in 'u' formally matches quantum numbers
q = q0[:1]
# 'v' must remain zero matrix to satisfy quantum number constraints
return (u, s, v, q)
# require NumPy arrays for indexing
q0 = np.array(q0)
q1 = np.array(q1)
# sort quantum numbers and arrange entries in A accordingly;
# using mergesort to avoid permutations of identical quantum numbers
idx0 = np.argsort(q0, kind='mergesort')
idx1 = np.argsort(q1, kind='mergesort')
if np.any(idx0 - np.arange(len(idx0))):
# if not sorted yet...
q0 = q0[idx0]
A = A[idx0, :]
if np.any(idx1 - np.arange(len(idx1))):
# if not sorted yet...
q1 = q1[idx1]
A = A[:, idx1]
# maximum intermediate dimension
max_interm_dim = min(A.shape)
# keep track of intermediate dimension
D = 0
# allocate memory for U and V matrices, singular values and
# corresponding intermediate quantum numbers
u = np.zeros((A.shape[0], max_interm_dim), dtype=A.dtype)
v = np.zeros((max_interm_dim, A.shape[1]), dtype=A.dtype)
s = np.zeros(max_interm_dim)
q = np.zeros(max_interm_dim, dtype=q0.dtype)
# for each shared quantum number...
for qn in qis:
# indices of current quantum number
iqn = np.where(q0 == qn)[0]; i0 = iqn[0]; i1 = iqn[-1] + 1
iqn = np.where(q1 == qn)[0]; j0 = iqn[0]; j1 = iqn[-1] + 1
# perform SVD decomposition of current block
usub, ssub, vsub = np.linalg.svd(A[i0:i1, j0:j1], full_matrices=False)
# update intermediate dimension
Dprev = D
D += len(ssub)
u[i0:i1, Dprev:D] = usub
v[Dprev:D, j0:j1] = vsub
s[Dprev:D] = ssub
q[Dprev:D] = qn
assert D <= max_interm_dim
# use actual intermediate dimensions
u = u[:, :D]
v = v[:D, :]
s = s[:D]
q = q[:D]
# truncate small singular values
idx = retained_bond_indices(s, tol)
u = u[:, idx]
v = v[idx, :]
s = s[idx]
q = q[idx]
# undo sorting of quantum numbers
if np.any(idx0 - np.arange(len(idx0))):
u = u[np.argsort(idx0), :]
if np.any(idx1 - np.arange(len(idx1))):
v = v[:, np.argsort(idx1)]
return (u, s, v, q)
def qr(A, q0, q1):
"""
Compute the block-wise QR decompositions of a matrix, taking block sparsity
structure dictated by quantum numbers into account (that is, `A[i, j]` can
only be non-zero if `q0[i] == q1[j]`).
The resulting R matrix is not necessarily upper triangular due to
reordering of entries.
"""
assert A.ndim == 2
assert len(q0) == A.shape[0]
assert len(q1) == A.shape[1]
assert is_qsparse(A, [q0, -q1])
# find common quantum numbers
qis = np.intersect1d(q0, q1)
if len(qis) == 0:
assert np.linalg.norm(A) == 0
# special case: no common quantum numbers;
# use dummy intermediate dimension 1 with all entries in 'R' set to zero
Q = np.zeros((A.shape[0], 1), dtype=A.dtype)
R = np.zeros((1, A.shape[1]), dtype=A.dtype)
# single column of 'Q' should have norm 1
Q[0, 0] = 1
# ensure non-zero entry in 'Q' formally matches quantum numbers
qinterm = q0[:1]
return (Q, R, qinterm)
# require NumPy arrays for indexing
q0 = np.array(q0)
q1 = np.array(q1)
# sort quantum numbers and arrange entries in A accordingly;
# using mergesort to avoid permutations of identical quantum numbers
idx0 = np.argsort(q0, kind='mergesort')
idx1 = np.argsort(q1, kind='mergesort')
if np.any(idx0 - np.arange(len(idx0))):
# if not sorted yet...
q0 = q0[idx0]
A = A[idx0, :]
if np.any(idx1 - np.arange(len(idx1))):
# if not sorted yet...
q1 = q1[idx1]
A = A[:, idx1]
# maximum intermediate dimension
max_interm_dim = min(A.shape)
# keep track of intermediate dimension
D = 0
Q = np.zeros((A.shape[0], max_interm_dim), dtype=A.dtype)
R = np.zeros((max_interm_dim, A.shape[1]), dtype=A.dtype)
# corresponding intermediate quantum numbers
qinterm = np.zeros(max_interm_dim, dtype=q0.dtype)
# for each shared quantum number...
for qn in qis:
# indices of current quantum number
iqn = np.where(q0 == qn)[0]; i0 = iqn[0]; i1 = iqn[-1] + 1
iqn = np.where(q1 == qn)[0]; j0 = iqn[0]; j1 = iqn[-1] + 1
# perform QR decomposition of current block
Qsub, Rsub = | np.linalg.qr(A[i0:i1, j0:j1], mode='reduced') | numpy.linalg.qr |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''PP with numeric integration. See also pyscf/pbc/gto/pesudo/pp_int.py
For GTH/HGH PPs, see:
Goedecker, Teter, Hutter, PRB 54, 1703 (1996)
Hartwigsen, Goedecker, and Hutter, PRB 58, 3641 (1998)
'''
import numpy as np
import scipy.linalg
import scipy.special
from pyscf import lib
from pyscf.gto import mole
from pyscf.pbc.gto.pseudo import pp_int
def get_alphas(cell):
'''alpha parameters from the non-divergent Hartree+Vloc G=0 term.
See ewald.pdf
Returns:
alphas : (natm,) ndarray
'''
return get_alphas_gth(cell)
def get_alphas_gth(cell):
'''alpha parameters for the local GTH pseudopotential.'''
G0 = np.zeros((1,3))
return -get_gth_vlocG(cell, G0)
def get_vlocG(cell, Gv=None):
'''Local PP kernel in G space: Vloc(G)
Returns:
(natm, ngrids) ndarray
'''
if Gv is None: Gv = cell.Gv
vlocG = get_gth_vlocG(cell, Gv)
return vlocG
def get_gth_vlocG(cell, Gv):
'''Local part of the GTH pseudopotential.
See MH (4.79).
Args:
Gv : (ngrids,3) ndarray
Returns:
(natm, ngrids) ndarray
'''
vlocG = pp_int.get_gth_vlocG_part1(cell, Gv)
# Add the C1, C2, C3, C4 contributions
G2 = np.einsum('ix,ix->i', Gv, Gv)
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
if symb not in cell._pseudo:
continue
pp = cell._pseudo[symb]
rloc, nexp, cexp = pp[1:3+1]
G2_red = G2 * rloc**2
cfacs = 0
if nexp >= 1:
cfacs += cexp[0]
if nexp >= 2:
cfacs += cexp[1] * (3 - G2_red)
if nexp >= 3:
cfacs += cexp[2] * (15 - 10*G2_red + G2_red**2)
if nexp >= 4:
cfacs += cexp[3] * (105 - 105*G2_red + 21*G2_red**2 - G2_red**3)
vlocG[ia,:] -= (2*np.pi)**(3/2.)*rloc**3*np.exp(-0.5*G2_red) * cfacs
return vlocG
def get_projG(cell, kpt=np.zeros(3)):
'''PP weight and projector for the nonlocal PP in G space.
Returns:
hs : list( list( np.array( , ) ) )
- hs[atm][l][i,j]
projs : list( list( list( list( np.array(ngrids) ) ) ) )
- projs[atm][l][m][i][ngrids]
'''
return get_gth_projG(cell, kpt+cell.Gv)
def get_gth_projG(cell, Gvs):
r'''G space projectors from the FT of the real-space projectors.
\int e^{iGr} p_j^l(r) Y_{lm}^*(theta,phi)
= i^l p_j^l(G) Y_{lm}^*(thetaG, phiG)
See MH Eq.(4.80)
'''
Gs,thetas,phis = cart2polar(Gvs)
hs = []
projs = []
for ia in range(cell.natm):
symb = cell.atom_symbol(ia)
pp = cell._pseudo[symb]
nproj_types = pp[4]
h_ia = []
proj_ia = []
for l,proj in enumerate(pp[5:]):
rl, nl, hl = proj
h_ia.append( np.array(hl) )
proj_ia_l = []
for m in range(-l,l+1):
projG_ang = Ylm(l,m,thetas,phis).conj()
proj_ia_lm = []
for i in range(nl):
projG_radial = projG_li(Gs,l,i,rl)
proj_ia_lm.append( (1j)**l * projG_radial*projG_ang )
proj_ia_l.append(proj_ia_lm)
proj_ia.append(proj_ia_l)
hs.append(h_ia)
projs.append(proj_ia)
return hs, projs
def projG_li(G, l, i, rl):
G = np.array(G)
G_red = G*rl
# MH Eq. (4.81)
return ( _qli(G_red,l,i) * np.pi**(5/4.) * G**l * np.sqrt(rl**(2*l+3))
/ np.exp(0.5*G_red**2) )
def _qli(x,l,i):
# MH Eqs. (4.82)-(4.93) :: beware typos!
# Mathematica formulas:
# p[l_, i_, r_] = Sqrt[2] r^(l + 2 (i - 1)) Exp[-r^2/(2 R^2)]/(R^(l + (4 i - 1)/2) Sqrt[Gamma[l + (4 i - 1)/2]])
# pG[l_, i_, G_] = Integrate[p[l, i, r] 4 Pi r^2 SphericalBesselJ[l, G r], {r, 0, Infinity}]
# qG[l_, i_, G_] := pG[l, i, G]/(Pi^(5/4) G^l Sqrt[R^(2 l + 3)]/Exp[(G R)^2/2])
# FullSimplify[qG[4, 3, G], R > 0 && G > 0]
sqrt = np.sqrt
if l==0 and i==0:
return 4*sqrt(2.)
elif l==0 and i==1:
return 8*sqrt(2/15.)*(3-x**2) # MH & GTH (right)
#return sqrt(8*2/15.)*(3-x**2) # HGH (wrong)
elif l==0 and i==2:
#return 16/3.*sqrt(2/105.)*(15-20*x**2+4*x**4) # MH (wrong)
return 16/3.*sqrt(2/105.)*(15-10*x**2+x**4) # HGH (right)
elif l==1 and i==0:
return 8*sqrt(1/3.)
elif l==1 and i==1:
return 16*sqrt(1/105.)*(5-x**2)
elif l==1 and i==2:
#return 32/3.*sqrt(1/1155.)*(35-28*x**2+4*x**4) # MH (wrong)
return 32/3.*sqrt(1/1155.)*(35-14*x**2+x**4) # HGH (right)
elif l==2 and i==0:
return 8*sqrt(2/15.)
elif l==2 and i==1:
return 16/3.*sqrt(2/105.)*(7-x**2)
elif l==2 and i==2:
#return 32/3.*sqrt(2/15015.)*(63-36*x**2+4*x**4) # MH (wrong I think)
return 32/3.*sqrt(2/15015.)*(63-18*x**2+x**4) # TCB
elif l==3 and i==0:
return 16*sqrt(1/105.)
elif l==3 and i==1:
return 32/3.*sqrt(1/1155.)*(9-x**2)
elif l==3 and i==2:
return 64/45.*sqrt(1/1001.)*(99-22*x**2+x**4)
elif l==4 and i==0:
return 16/3.*sqrt(2/105.)
elif l==4 and i==1:
return 32/3.*sqrt(2/15015.)*(11-x**2)
elif l==4 and i==2:
return 64/45.*sqrt(2/17017.)*(143-26*x**2+x**4)
else:
print("*** WARNING *** l =", l, ", i =", i, "not yet implemented for NL PP!")
return 0.
def Ylm_real(l,m,theta,phi):
'''Real spherical harmonics, if desired.'''
Ylabsm = Ylm(l,np.abs(m),theta,phi)
if m < 0:
return np.sqrt(2.) * Ylabsm.imag
elif m > 0:
return np.sqrt(2.) * Ylabsm.real
else: # m == 0
return Ylabsm.real
def Ylm(l,m,theta,phi):
'''
Spherical harmonics; returns a complex number
Note the "convention" for theta and phi:
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.special.sph_harm.html
'''
#return scipy.special.sph_harm(m=m,n=l,theta=phi,phi=theta)
return scipy.special.sph_harm(m,l,phi,theta)
def cart2polar(rvec):
# The rows of rvec are the 3-component vectors
# i.e. rvec is N x 3
x,y,z = rvec.T
r = lib.norm(rvec, axis=1)
# theta is the polar angle, 0 < theta < pi
# catch possible 0/0
theta = | np.arccos(z/(r+1e-200)) | numpy.arccos |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(224, 'P n -3 m :2', transformations)
space_groups[224] = sg
space_groups['P n -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = | N.array([0,0,0]) | numpy.array |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
This is where everything is put togegher. It receives all the user-provided
specs from 'main.py' as well as the data generated by 'data_preparation.py' (or
by the user if working with real experimental data), and perform the PAHMC
algorithm for state and parameter estimations. The dynamics model is defined in
'cuda_lib_dynamics.py' or in 'def_dynamics.py', other necessary CUDA kernels
are defined in 'cuda_utilities.py', and the gradient descent functionality is
implemented in 'gd.py'.
"""
from pathlib import Path
import time
from numba import cuda, jit
import numpy as np
from pahmc_ode_gpu.gd import descend
from pahmc_ode_gpu.cuda_utilities import k__action, k__diff, k__dAdX, \
k__dAdpar, k__leapfrog_X, k__leapfrog_par, k__linearop2d, k__linearop1d, \
k__zeros1d
def anneal(k__field, k__jacobian, k__dfield_dpar, stimuli, Y,
D, M, obsdim, dt, Rf0, alpha, betamax,
n_iter, epsilon, S, mass, scaling,
soft_dynrange, par_start, name, tune_beta, Rm=1.0, burn=0.5):
"""
This is the master function that brings the PAHMC algorithm into one piece.
Inputs
------
k__field: CUDA kernel.
k__jacobian: CUDA kernel.
k__dfield_dpar: CUDA kernel.
stimuli: D-by-M numpy array of floats.
Y: len(obsdim)-by-M numpy array of floats.
D: integer.
M: integer.
obsdim: 1d (shapeless) numpy array of integers.
dt: float.
Rf0: 1d (shapeless) numpy array of floats, with length D.
alpha: float.
betamax: integer.
n_iter: 1d (shapeless) numpy array of integers, with length
betamax.
epsilon: 1d (shapeless) numpy array of floats, with length betamax.
S: 1d (shapeless) numpy array of integers, with length
betamax.
mass: betamax-by-3 numpy array of floats.
scaling: 1d (shapeless) numpy array of floats, with length betamax.
soft_dynrange: D-by-2 numpy array of floats.
par_start: 1d (shapeless) numpy array.
name: the name of the dynamical system.
tune_beta: the current beta value that is undergoing stepwise tuning.
Rm: float.
burn: float. Note that this is the proportion of HMC samples that
are thrown away in each beta, which is different from the
'burndata' switch.
Returns
------
Rm: float.
burn: float.
Rf: betamax-by-D numpy array of floats.
eta_avg: 1d (shapeless) numpy array of floats, with length betamax.
acceptance: 1d (shapeless) numpy array of floats, with length betamax.
action: betamax-by-(max(n_iter)+2) numpy array of floats.
action_meanpath: 1d (shapeless) numpy array of floats, with length betamax.
ME_meanpath: 1d (shapeless) numpy array of floats, with length betamax.
FE_meanpath: 1d (shapeless) numpy array of floats, with length betamax.
X_init: betamax-by-D-by-M numpy array of floats.
X_gd: betamax-by-D-by-M numpy array of floats.
X_mean: betamax-by-D-by-M numpy array of floats.
par_history: betamax-by-(max(n_iter)+2)-by-len(par_start) numpy array
of floats.
par_mean: betamax-by-len(par_start) numpy array of floats.
Xfinal_history: betamax-by-(max(n_iter)+2)-by-D numpy array of floats.
"""
# create some utility variables
Rf = Rf0 * (alpha ** np.arange(betamax))[:, np.newaxis]
unobsdim = np.int64(np.setdiff1d(np.arange(D), obsdim))
obs_ind = -np.ones(D, dtype='int64')
for l in range(len(obsdim)):
obs_ind[obsdim[l]] = l
mass_X = np.zeros((betamax,D,M))
mass_par = np.zeros((betamax,len(par_start)))
for beta in range(betamax):
mass_X[beta, obsdim, :] = mass[beta, 0]
mass_X[beta, unobsdim, :] = mass[beta, 1]
mass_par[beta, :] = mass[beta, 2]
# initialize the remaining output variables
eta_avg = np.zeros(betamax)
acceptance = np.zeros(betamax)
action = np.zeros((betamax,np.max(n_iter)+2))
action_meanpath = np.zeros(betamax)
ME_meanpath = np.zeros(betamax)
FE_meanpath = np.zeros(betamax)
X_init = np.zeros((betamax,D,M))
X_gd = np.zeros((betamax,D,M))
X_mean = np.zeros((betamax,D,M))
par_history = np.zeros((betamax,np.max(n_iter)+2,len(par_start)))
par_mean = np.zeros((betamax,len(par_start)))
Xfinal_history = np.zeros((betamax,np.max(n_iter)+2,D))
# bring static variables to GPU
d_stimuli = cuda.to_device(stimuli)
d_Y = cuda.to_device(Y)
d_obsdim = cuda.to_device(obsdim)
d_obs_ind = cuda.to_device(obs_ind)
# initialize device arrays that will be used as outputs for the kernels
d_field = cuda.device_array((D,M))
d_jacobian = cuda.device_array((D,D,M))
d_dfield_dpar = cuda.device_array((D,len(par_start),M))
d_diff = cuda.device_array((D,M-1))
d_dAdX = cuda.device_array((D,M))
d_dAdpar = cuda.device_array((len(par_start),))
d_action = cuda.device_array((1,))
# stepwise tuning===========================================================
if tune_beta == 0:
# perform dynamic initialization, i.e., initialize X and par for beta=0
X_init[0, :, 0] \
= np.random.uniform(soft_dynrange[:, 0], soft_dynrange[:, 1], (D,))
X_init[0, obsdim, 0] = Y[:, 0]
d_par_start = cuda.to_device(par_start)
d_field_m = cuda.device_array((D,1))
for m in range(M-1):
d_X_m = cuda.to_device(X_init[0][:, [m]])
d_stimuli_m = cuda.to_device(stimuli[:, [m]])
k__field[(16,32), (2,128)](d_X_m, d_par_start, d_stimuli_m,
d_field_m)
F = dt / 2 * d_field_m.copy_to_host()
d_X_m = cuda.to_device(X_init[0][:, [m]]+F)
k__field[(16,32), (2,128)](d_X_m, d_par_start, d_stimuli_m,
d_field_m)
X_init[0][:, [m+1]] \
= X_init[0][:, [m]] + dt * d_field_m.copy_to_host()
X_init[0, obsdim, m+1] = Y[:, m+1]
par_history[0, 0, :] = par_start
else:
# extract results from the previous beta
file \
= np.load(Path.cwd()/'user_results'/f'tune_{name}_{tune_beta-1}.npz')
X_init = file['X_mean']
par_history[0, 0, :] = file['par_mean'][0, :]
file.close()
#===========================================================================
# do PAHMC
for beta in range(betamax):
print('-------------------------------------------------------------')
print(f'beta = {beta}:\n')
# initialize device arrays specific to current beta
d_Rf_beta = cuda.to_device(Rf[beta, :])
d_mass_X_beta = cuda.to_device(mass_X[beta, :, :])
d_mass_par_beta = cuda.to_device(mass_par[beta, :])
# initialize action and Xfinal_history (2 out of 4) for current beta
d_X_init_beta = cuda.to_device(X_init[beta, :, :])
d_par_init_beta = cuda.to_device(par_history[beta, 0, :])
k__field[(16,32), (2,128)](d_X_init_beta, d_par_init_beta, d_stimuli,
d_field)
k__zeros1d[40, 256](d_action)
cuda.synchronize()
k__action[(16,32), (16,16)](d_X_init_beta, d_field, d_Rf_beta, d_Y, dt,
d_obsdim, Rm, d_action)
action[beta, 0] = d_action.copy_to_host()[0]
Xfinal_history[beta, 0, :] = X_init[beta, :, -1]
# exploration - gradient descent
X_gd[beta, :, :], par_history[beta, 1, :], action[beta, 1], eta \
= descend(k__field, k__jacobian, k__dfield_dpar,
X_init[beta, :, :], par_history[beta, 0, :], Rf[beta, :],
d_stimuli, d_Y, dt, d_obsdim, d_obs_ind, Rm)
eta_avg[beta] = np.mean(eta)
Xfinal_history[beta, 1, :] = X_gd[beta, :, -1]
# exploitation - Hamiltonian Monte Carlo
t0 = time.perf_counter()
errcount = 0
printflag = 0
X0 = X_gd[beta, :, :]
par0 = par_history[beta, 1, :]
for n in range(2, n_iter[beta]+2):
print(f'\r Performing calculations... (step={n-1})', end='')
# call HMC
X, par, action[beta, n], accept, errflag \
= hmc(X0, par0, action[beta, n-1],
beta, d_Rf_beta, d_mass_X_beta, d_mass_par_beta,
D, M, dt, obsdim, unobsdim, Rm,
epsilon, S, mass, scaling, mass_X, mass_par,
k__field, k__jacobian, k__dfield_dpar,
d_stimuli, d_Y, d_obsdim, d_obs_ind,
d_field, d_jacobian, d_dfield_dpar,
d_diff, d_dAdX, d_dAdpar, d_action)
X0 = X
par0 = par
# sanity check
if errflag == 1:
errcount += 1
if errcount == 5 and printflag == 0:
print('\n WARNING: got bad values when performing '
+'leapfrog simulations!')
printflag = 1
else:
errcount = 0
# keep results
acceptance[beta] += accept
if n - 1 > burn * n_iter[beta]:
X_mean[beta, :, :] += X
par_mean[beta, :] += par
Xfinal_history[beta, n, :] = X[:, -1]
par_history[beta, n, :] = par
print(f'\r Performing calculations... '
+f'finished in {time.perf_counter()-t0:.2f} seconds;\n')
# finalize acceptance rate and mean path for current beta
acceptance[beta] /= n_iter[beta]
X_mean[beta, :, :] /= np.ceil((1-burn)*n_iter[beta])
par_mean[beta, :] /= np.ceil((1-burn)*n_iter[beta])
# calculate action, measurement and model errors from mean path
d_X_mean_beta = cuda.to_device(X_mean[beta, :, :])
d_par_mean_beta = cuda.to_device(par_mean[beta, :])
k__field[(16,32), (2,128)](d_X_mean_beta, d_par_mean_beta, d_stimuli,
d_field)
k__zeros1d[40, 256](d_action)
cuda.synchronize()
k__action[(16,32), (16,16)](d_X_mean_beta, d_field, d_Rf_beta, d_Y, dt,
d_obsdim, Rm, d_action)
action_meanpath[beta] = d_action.copy_to_host()[0]
ME_meanpath[beta] = Rm / 2 / M * np.sum((X_mean[beta, obsdim, :]-Y)**2)
field_mean = d_field.copy_to_host()
fX_mean = X_mean[beta, :, :M-1] \
+ dt / 2 * (field_mean[:, 1:] + field_mean[:, :M-1])
FE_meanpath[beta] \
= np.sum(Rf[beta, :]/2/M\
*np.sum((X_mean[beta, :, 1:]-fX_mean)**2, axis=1))
# print action_meanpath and FE_meanpath for current beta
print(f' action (mean path) = {action_meanpath[beta]};')
print(f' model error (mean path) = {FE_meanpath[beta]}.\n')
# initialize X and par (2 out of 4) for next beta
if beta != betamax - 1:
X_init[beta+1, :, :] = X_mean[beta, :, :]
par_history[beta+1, 0, :] = par_mean[beta, :]
return burn, Rm, Rf, eta_avg, acceptance, \
action, action_meanpath, ME_meanpath, FE_meanpath, \
X_init, X_gd, X_mean, par_history, par_mean, Xfinal_history
def hmc(X0, par0, action0,
beta, d_Rf_beta, d_mass_X_beta, d_mass_par_beta,
D, M, dt, obsdim, unobsdim, Rm,
epsilon, S, mass, scaling, mass_X, mass_par,
k__field, k__jacobian, k__dfield_dpar,
d_stimuli, d_Y, d_obsdim, d_obs_ind,
d_field, d_jacobian, d_dfield_dpar,
d_diff, d_dAdX, d_dAdpar, d_action):
"""
This function generates one HMC proposal per call.
Inputs
------
X0: D-by-M numpy array of floats.
par0: 1d (shapeless) numpy array of floats.
action0: float.
beta: integer.
d_Rf_beta: 1d (shapeless) device array of floats, with length D.
d_mass_X_beta: D-by-M device array of floats.
d_mass_par_beta: 1d (shapeless) device array of floats, with length
len(par0).
...
unobsdim: 1d (shapeless) numpy array of integers.
...
mass_X: betamax-by-D-by-M numpy array of floats.
mass_par: betamax-by-len(par0) array of floats.
...
d_stimuli: D-by-M device array of floats.
d_Y: len(obsdim)-by-M device array of floats.
d_obsdim: 1d (shapeless) device array of floats.
d_obs_ind: 1d (shapeless) device array of floats, with length D.
d_field: D-by-M device array of floats.
d_jacobian: D-by-D-by-M device array of floats.
d_dfield_dpar: D-by-len(par0)-by-M device array of floats.
d_diff: D-by-(M-1) deivce array of floats.
d_dAdX: D-by-M device array of floats.
d_dAdpar: 1d (shapeless) device array of floats, with length
len(par0).
d_action: 1d (shapeless) device array of float, with length 1.
Returns
------
X: D-by-M numpy array of floats.
par: 1d (shapeless) numpy array of floats.
action: float.
accept: integer.
errflag: integer.
"""
# generate initial momenta
pX0, ppar0 = pre_process(beta, D, M, obsdim, unobsdim, mass, len(par0))
# bring phase space variables to GPU
d_X = cuda.to_device(X0)
d_par = cuda.to_device(par0)
d_pX = cuda.to_device(pX0)
d_ppar = cuda.to_device(ppar0)
# initialze candidate action
k__zeros1d[40, 256](d_action)
# Hamiltonian dynamics - half step for momenta
k__field[(16,32), (2,128)](d_X, d_par, d_stimuli, d_field)
cuda.synchronize()
k__diff[(32,16), (2,128)](d_X, d_field, dt, d_diff)
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
k__zeros1d[40, 256](d_dAdpar)
cuda.synchronize()
k__dAdX[(32,16), (2,128)](d_X, d_diff, d_jacobian, d_Rf_beta,
scaling[beta], d_Y, dt, d_obsdim,
d_obs_ind, Rm, d_dAdX)
k__dAdpar[(4,4,32), (2,2,64)](d_X, d_diff, d_dfield_dpar,
d_Rf_beta, scaling[beta], dt, d_dAdpar)
cuda.synchronize()
k__linearop2d[(32,16), (2,128)](d_pX, -epsilon[beta]/2, d_dAdX, d_pX)
k__linearop1d[40, 256](d_ppar, -epsilon[beta]/2, d_dAdpar, d_ppar)
cuda.synchronize()
# Hamiltonian dynamics - full steps for X, par, and momenta
for i in range(S[beta]):
# full step for X, par
k__leapfrog_X[(32,16), (2,128)](d_pX, epsilon[beta], d_mass_X_beta, d_X)
k__leapfrog_par[40, 256](d_ppar, epsilon[beta], d_mass_par_beta, d_par)
cuda.synchronize()
# full step for momenta except at the end of trajectory
k__field[(16,32), (2,128)](d_X, d_par, d_stimuli, d_field)
cuda.synchronize()
k__diff[(32,16), (2,128)](d_X, d_field, dt, d_diff)
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
k__zeros1d[40, 256](d_dAdpar)
cuda.synchronize()
k__dAdX[(32,16), (2,128)](d_X, d_diff, d_jacobian, d_Rf_beta,
scaling[beta], d_Y, dt, d_obsdim,
d_obs_ind, Rm, d_dAdX)
k__dAdpar[(4,4,32), (2,2,64)](d_X, d_diff, d_dfield_dpar,
d_Rf_beta, scaling[beta], dt, d_dAdpar)
cuda.synchronize()
if i != S[beta] - 1:
k__linearop2d[(32,16), (2,128)](d_pX, -epsilon[beta], d_dAdX, d_pX)
k__linearop1d[40, 256](d_ppar, -epsilon[beta], d_dAdpar, d_ppar)
cuda.synchronize()
# Hamiltonian dynamics - half step for momenta
k__linearop2d[(32,16), (2,128)](d_pX, -epsilon[beta]/2, d_dAdX, d_pX)
k__linearop1d[40, 256](d_ppar, -epsilon[beta]/2, d_dAdpar, d_ppar)
cuda.synchronize()
# bring momenta back to CPU
pX = d_pX.copy_to_host()
ppar = d_ppar.copy_to_host()
# get candidate action
k__action[(16,32), (16,16)](d_X, d_field, d_Rf_beta, d_Y, dt, d_obsdim, Rm,
d_action)
action_cand = d_action.copy_to_host()[0]
# calculate change in Hamiltonian
dH = post_process(pX0, ppar0, pX, ppar, action0, action_cand, beta,
D, M, mass_X, mass_par, scaling)
# check for numerical issues
if np.isnan(action_cand) == False and | np.isinf(action_cand) | numpy.isinf |
import unittest
from context import modest as md
import numpy as np
from scipy.linalg import block_diag
from scipy.stats import multivariate_normal as mvn
class TestModularFilters(unittest.TestCase):
def setUp(self):
class simpleState(md.substates.SubState):
def __init__(self, dimension, stateVectorHistory, covarianceStorage='covariance'):
if not isinstance(stateVectorHistory['covariance'], md.utils.covarianceContainer):
stateVectorHistory['covariance'] = md.utils.covarianceContainer(
stateVectorHistory['covariance'],covarianceStorage
)
super().__init__(stateDimension=dimension, stateVectorHistory=stateVectorHistory)
self.simpleState = simpleState
class oneDPositionVelocity(md.substates.SubState):
def __init__(self, objectID, stateVectorHistory,covarianceStorage='covariance'):
if not isinstance(stateVectorHistory['covariance'], md.utils.covarianceContainer):
stateVectorHistory['covariance'] = md.utils.covarianceContainer(
stateVectorHistory['covariance'],covarianceStorage
)
super().__init__(stateDimension=2, stateVectorHistory=stateVectorHistory)
self.stateVector = stateVectorHistory['stateVector']
self.objectID = objectID
self.covarianceStorage = covarianceStorage
def storeStateVector(self, svDict):
xPlus = svDict['stateVector']
aPriori = svDict['aPriori']
if aPriori is False:
self.stateVector = xPlus
svDict['stateVector'] = self.stateVector
super().storeStateVector(svDict)
def timeUpdate(self, dT, dynamics=None):
F = np.array([[1, dT],[0, 1]])
dT2 = np.square(dT)
dT3 = np.power(dT, 3)
dT4 = np.power(dT, 4)
if self.covariance().form == 'covariance':
Q = np.array([[dT4/4, dT3/2],[dT3/2, dT2]])
elif self.covariance().form == 'cholesky':
Q = np.array([[dT2/2,0],[dT,0]])
accelKey = self.objectID + 'acceleration'
if dynamics is not None and accelKey in dynamics:
acceleration = dynamics[accelKey]['value']
accVar = dynamics[accelKey]['var']
else:
acceleration = 0
accVar = 0
self.stateVector = F.dot(self.stateVector) + np.array([0, acceleration])
if self.covariance().form == 'covariance':
Q = md.utils.covarianceContainer(Q * accVar, 'covariance')
elif self.covariance().form == 'cholesky':
Q = md.utils.covarianceContainer(Q * np.sqrt(accVar), 'cholesky')
else:
raise ValueError('unrecougnized covariance')
return {'F': F, 'Q': Q}
def getMeasurementMatrices(self, measurement, source=None):
HDict = {}
RDict = {}
dyDict = {}
if isinstance(source, oneDObjectMeasurement) and source.objectID == self.objectID:
if 'position' in measurement:
H = np.array([[1, 0]])
dY = measurement['position']['value'] - H.dot(self.stateVector)
HDict['%s position' %self.objectID] = H
RDict['%s position' %self.objectID] = np.array(
[[measurement['position']['var']]]
)
dyDict['%s position' %self.objectID] = dY
if 'velocity' in measurement:
H = np.array([[0, 1]])
dY = measurement['velocity']['value'] - H.dot(self.stateVector)
HDict['%s velocity' %self.objectID] = H
RDict['%s velocity' %self.objectID] = np.array(
[[measurement['velocity']['var']]]
)
dyDict['%s velocity' %self.objectID] = dY
return {'H': HDict, 'R': RDict, 'dY': dyDict}
self.oneDPositionVelocity = oneDPositionVelocity
class oneDObjectMeasurement(md.signals.SignalSource):
def __init__(self, objectID):
self.objectID = objectID
return
def computeAssociationProbability(self, measurement, stateDict, validationThreshold=0):
myMeasMat = stateDict[self.objectID]['stateObject'].getMeasurementMatrices(measurement, source=self)
dY = None
R = None
H = None
for key in myMeasMat['dY']:
if H is None:
H = myMeasMat['H'][key]
R = myMeasMat['R'][key]
dY = myMeasMat['dY'][key]
else:
H = np.vstack([H, myMeasMat['H'][key]])
R = block_diag(R, myMeasMat['R'][key])
dY = np.append(dY, myMeasMat['dY'][key])
if dY is not None:
P = stateDict[self.objectID]['stateObject'].covariance()
Pval = P.convertCovariance('covariance').value
# if P.form == 'cholesky':
# Pval = P.value.dot(P.value.transpose())
# elif P.form == 'covariance':
# Pval = P.value
# else:
# raise ValueError('Unrecougnized covariance specifier %s' %P.form)
S = H.dot(Pval).dot(H.transpose()) + R
myProbability = mvn.pdf(dY, cov=S)
else:
myProbability = 0
return myProbability
self.oneDObjectMeasurement = oneDObjectMeasurement
def testAddStates(self):
# Create a simple state class to add to the filter
stateLength1 = | np.random.randint(0, 10) | numpy.random.randint |
import numpy as np
from typing import Iterable, Tuple
from collections import namedtuple
import scipy.stats as stats
from abito.lib.stats.plain import *
__all__ = [
't_test_from_stats',
't_test',
't_test_1samp',
'mann_whitney_u_test_from_stats',
'mann_whitney_u_test',
'bootstrap_test',
'shapiro_test',
'median_test',
'levene_test',
'mood_test',
]
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not np.nan.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
TTestResult = namedtuple('TTestResult', [
'statistic',
'p_value',
'mean_diff',
'mean_diff_std',
])
TTestResult.__new__.__defaults__ = (np.nan,) * len(TTestResult._fields)
def _t_test_from_stats(mean1, mean2, denom, df) -> TTestResult:
d = mean1 - mean2
with | np.errstate(divide='ignore', invalid='ignore') | numpy.errstate |
import numpy as np
from colour.characterisation import polynomial_expansion_Finlayson2015, matrix_colour_correction_Finlayson2015
from .Node import Node
from typing import Optional, Any
from numpy.typing import NDArray
from camera_match.optimise import NodeOptimiser
class LinearMatrix(Node):
def __init__(self, matrix: Optional[NDArray[Any]] = None):
self.matrix = matrix
if self.matrix is None:
self.matrix = self.identity()
def solve(self, source: NDArray[Any], target: NDArray[Any]):
# Setting Matrix with Moore-Penrose solution for speed
self.matrix = matrix_colour_correction_Finlayson2015(source, target, degree=1)
optimiser = NodeOptimiser(self.apply_matrix, self.matrix)
self.matrix = optimiser.solve(source, target)
def apply(self, RGB: NDArray[Any]) -> NDArray[Any]:
return self.apply_matrix(RGB, self.matrix)
@staticmethod
def identity() -> NDArray[Any]:
return np.identity(3)
@staticmethod
def apply_matrix(RGB: NDArray[Any], matrix: NDArray[Any]) -> NDArray[Any]:
shape = RGB.shape
RGB = np.reshape(RGB, (-1, 3))
return np.reshape(np.transpose(np.dot(matrix, np.transpose(RGB))), shape)
class RootPolynomialMatrix(Node):
def __init__(self, matrix: Optional[NDArray[Any]] = None, degree: int=2):
if degree > 4 or degree < 1:
raise ValueError(
f"Degree for Root Polynomial Matrix must be between 1 and 4."
)
self.matrix = matrix
self.degree = degree
if self.matrix is None:
self.matrix = self.identity(self.degree)
def solve(self, source: NDArray[Any], target: NDArray[Any]):
# Setting Matrix with Moore-Penrose solution for speed
self.matrix = matrix_colour_correction_Finlayson2015(source, target, degree=self.degree, root_polynomial_expansion=True)
optimiser = NodeOptimiser(self.apply_matrix, self.matrix, fn_args=(self.degree))
self.matrix = optimiser.solve(source, target)
def apply(self, RGB: NDArray[Any]) -> NDArray[Any]:
return self.apply_matrix(RGB, self.matrix, self.degree)
@staticmethod
def identity(degree: int) -> NDArray[Any]:
polynomial_expansion = {
1: np.identity(3),
2: np.hstack((np.identity(3), np.zeros((3, 3)))),
3: np.hstack((np.identity(3), np.zeros((3, 10)))),
4: np.hstack((np.identity(3), np.zeros((3, 19)))),
}
return polynomial_expansion[degree]
@staticmethod
def apply_matrix(RGB: NDArray[Any], matrix: NDArray[Any], degree: int) -> NDArray[Any]:
shape = RGB.shape
RGB = np.reshape(RGB, (-1, 3))
RGB_e = polynomial_expansion_Finlayson2015(RGB, degree, root_polynomial_expansion=True)
return np.reshape(np.transpose(np.dot(matrix, np.transpose(RGB_e))), shape)
class TetrahedralMatrix(Node):
def __init__(self, matrix: Optional[NDArray[Any]] = None):
self.matrix = matrix
if self.matrix is None:
self.matrix = self.identity()
def solve(self, source: NDArray[Any], target: NDArray[Any]):
optimiser = NodeOptimiser(self.apply_matrix, self.matrix)
self.matrix = optimiser.solve(source, target)
def apply(self, RGB: NDArray[Any]) -> NDArray[Any]:
return self.apply_matrix(RGB, self.matrix)
@staticmethod
def identity() -> NDArray[Any]:
return np.array([[1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 1, 1], [0, 0, 1], [1, 0, 1]])
@staticmethod
def apply_matrix(RGB: NDArray[Any], matrix: NDArray[Any]) -> NDArray[Any]:
def tetra_case(index, r, matrix_r, g, matrix_g, b, matrix_b, constant):
R = np.multiply.outer(r[index], matrix_r)
G = np.multiply.outer(g[index], matrix_g)
B = | np.multiply.outer(b[index], matrix_b) | numpy.multiply.outer |
from parcels import (FieldSet, ScipyParticle, JITParticle, Variable, ErrorCode)
from parcels.particlefile import _set_calendar
from parcels.tools.converters import _get_cftime_calendars, _get_cftime_datetimes
from parcels import ParticleSetSOA, ParticleFileSOA, KernelSOA # noqa
from parcels import ParticleSetAOS, ParticleFileAOS, KernelAOS # noqa
import numpy as np
import pytest
import os
from netCDF4 import Dataset
import cftime
import random as py_random
ptype = {'scipy': ScipyParticle, 'jit': JITParticle}
pset_type = {'soa': {'pset': ParticleSetSOA, 'pfile': ParticleFileSOA, 'kernel': KernelSOA},
'aos': {'pset': ParticleSetAOS, 'pfile': ParticleFileAOS, 'kernel': KernelAOS}}
def fieldset(xdim=40, ydim=100):
U = np.zeros((ydim, xdim), dtype=np.float32)
V = np.zeros((ydim, xdim), dtype=np.float32)
lon = np.linspace(0, 1, xdim, dtype=np.float32)
lat = | np.linspace(-60, 60, ydim, dtype=np.float32) | numpy.linspace |
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
"set_printoptions", "get_printoptions", "printoptions",
"format_float_positional", "format_float_scientific"]
__docformat__ = 'restructuredtext'
#
# Written by <NAME> <<EMAIL>>
# last revision: 1996-3-13
# modified by <NAME> 1997-3-3 for repr's and str's (and other details)
# and by <NAME> 2000-4-1 for numarray
# and by <NAME> 2005-8-22 for numpy
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
# scalars but for different purposes. scalartypes.c.src has str/reprs for when
# the scalar is printed on its own, while arrayprint.py has strs for when
# scalars are printed inside an ndarray. Only the latter strs are currently
# user-customizable.
import sys
import functools
import numbers
if sys.version_info[0] >= 3:
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
else:
try:
from thread import get_ident
except ImportError:
from dummy_thread import get_ident
import numpy as np
from . import numerictypes as _nt
from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat
from . import multiarray
from .multiarray import (array, dragon4_positional, dragon4_scientific,
datetime_as_string, datetime_data, ndarray,
set_legacy_print_mode)
from .fromnumeric import ravel, any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
from .overrides import array_function_dispatch, set_module
import warnings
import contextlib
_format_options = {
'edgeitems': 3, # repr N leading and trailing items of each dimension
'threshold': 1000, # total items > triggers array summarization
'floatmode': 'maxprec',
'precision': 8, # precision of floating point representations
'suppress': False, # suppress printing small floating values in exp format
'linewidth': 75,
'nanstr': 'nan',
'infstr': 'inf',
'sign': '-',
'formatter': None,
'legacy': False}
def _make_options_dict(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
sign=None, formatter=None, floatmode=None, legacy=None):
""" make a dictionary out of the non-None arguments, plus sanity checks """
options = {k: v for k, v in locals().items() if v is not None}
if suppress is not None:
options['suppress'] = bool(suppress)
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
if floatmode not in modes + [None]:
raise ValueError("floatmode option must be one of " +
", ".join('"{}"'.format(m) for m in modes))
if sign not in [None, '-', '+', ' ']:
raise ValueError("sign option must be one of ' ', '+', or '-'")
if legacy not in [None, False, '1.13']:
warnings.warn("legacy printing option can currently only be '1.13' or "
"`False`", stacklevel=3)
if threshold is not None:
# forbid the bad threshold arg suggested by stack overflow, gh-12351
if not isinstance(threshold, numbers.Number):
raise TypeError("threshold must be numeric")
if np.isnan(threshold):
raise ValueError("threshold must be non-NAN, try "
"sys.maxsize for untruncated representation")
return options
@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, **kwarg):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int or None, optional
Number of digits of precision for floating point output (default 8).
May be `None` if `floatmode` is not `fixed`, to print as many digits as
necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
If True, always print floating point numbers using fixed point
notation, in which case numbers equal to zero in the current precision
will print as zero. If False, then scientific notation is used when
absolute value of the smallest number is < 1e-4 or the ratio of the
maximum absolute value to the minimum is > 1e3. The default is False.
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values. (default '-')
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'object' : `np.object_` arrays
- 'str' : all other strings
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values
(default maxprec_equal):
* 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
* 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
* 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
* 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
See Also
--------
get_printoptions, printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Use `printoptions` as a context manager to set the values temporarily.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> np.array([1.123456789])
[1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> np.arange(10)
array([0, 1, 2, ..., 7, 8, 9])
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3, infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
Also to temporarily override options, use `printoptions` as a context manager:
>>> with np.printoptions(precision=2, suppress=True, threshold=5):
... np.linspace(0, 10, 10)
array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
"""
legacy = kwarg.pop('legacy', None)
if kwarg:
msg = "set_printoptions() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
suppress, nanstr, infstr, sign, formatter,
floatmode, legacy)
# formatter is always reset
opt['formatter'] = formatter
_format_options.update(opt)
# set the C variable for legacy mode
if _format_options['legacy'] == '1.13':
set_legacy_print_mode(113)
# reset the sign option in legacy mode to avoid confusion
_format_options['sign'] = '-'
elif _format_options['legacy'] is False:
set_legacy_print_mode(0)
@set_module('numpy')
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
- sign : str
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, printoptions, set_string_function
"""
return _format_options.copy()
@set_module('numpy')
@contextlib.contextmanager
def printoptions(*args, **kwargs):
"""Context manager for setting print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `set_printoptions` for the full description of
available options.
Examples
--------
>>> from numpy.testing import assert_equal
>>> with np.printoptions(precision=2):
... np.array([2.0]) / 3
array([0.67])
The `as`-clause of the `with`-statement gives the current print options:
>>> with np.printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
try:
np.set_printoptions(*args, **kwargs)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _leading_trailing(a, edgeitems, index=()):
"""
Keep only the N-D corners (leading and trailing edges) of an array.
Should be passed a base-class ndarray, since it makes no guarantees about
preserving subclasses.
"""
axis = len(index)
if axis == a.ndim:
return a[index]
if a.shape[axis] > 2*edgeitems:
return concatenate((
_leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
_leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
), axis=axis)
else:
return _leading_trailing(a, edgeitems, index + np.index_exp[:])
def _object_format(o):
""" Object arrays containing lists should be printed unambiguously """
if type(o) is list:
fmt = 'list({!r})'
else:
fmt = '{!r}'
return fmt.format(o)
def repr_format(x):
return repr(x)
def str_format(x):
return str(x)
def _get_formatdict(data, **opt):
prec, fmode = opt['precision'], opt['floatmode']
supp, sign = opt['suppress'], opt['sign']
legacy = opt['legacy']
# wrapped in lambdas to avoid taking a code path with the wrong type of data
formatdict = {
'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
'float': lambda:
FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'longfloat': lambda:
FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'complexfloat': lambda:
ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'longcomplexfloat': lambda:
ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
'void': lambda: str_format,
'numpystr': lambda: repr_format,
'str': lambda: str}
# we need to wrap values in `formatter` in a lambda, so that the interface
# is the same as the above values.
def indirect(x):
return lambda: x
formatter = opt['formatter']
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = indirect(formatter['all'])
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = indirect(formatter['int_kind'])
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = indirect(formatter['float_kind'])
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = indirect(formatter['complex_kind'])
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = indirect(formatter['str_kind'])
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = indirect(formatter[key])
return formatdict
def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
dtypeobj = dtype_.type
formatdict = _get_formatdict(data, **options)
if issubclass(dtypeobj, _nt.bool_):
return formatdict['bool']()
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
return formatdict['timedelta']()
else:
return formatdict['int']()
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
return formatdict['longfloat']()
else:
return formatdict['float']()
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
return formatdict['longcomplexfloat']()
else:
return formatdict['complexfloat']()
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
return formatdict['numpystr']()
elif issubclass(dtypeobj, _nt.datetime64):
return formatdict['datetime']()
elif issubclass(dtypeobj, _nt.object_):
return formatdict['object']()
elif issubclass(dtypeobj, _nt.void):
if dtype_.names is not None:
return StructuredVoidFormat.from_data(data, **options)
else:
return formatdict['void']()
else:
return formatdict['numpystr']()
def _recursive_guard(fillvalue='...'):
"""
Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
Decorates a function such that if it calls itself with the same first
argument, it returns `fillvalue` instead of recursing.
Largely copied from reprlib.recursive_repr
"""
def decorating_function(f):
repr_running = set()
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
return f(self, *args, **kwargs)
finally:
repr_running.discard(key)
return wrapper
return decorating_function
# gracefully handle recursive calls, when object arrays contain themselves
@_recursive_guard()
def _array2string(a, options, separator=' ', prefix=""):
# The formatter __init__s in _get_format_function cannot deal with
# subclasses yet, and we also need to avoid recursion issues in
# _formatArray with subclasses which return 0d arrays in place of scalars
data = asarray(a)
if a.shape == ():
a = data
if a.size > options['threshold']:
summary_insert = "..."
data = _leading_trailing(data, options['edgeitems'])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = _get_format_function(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, options['linewidth'],
next_line_prefix, separator, options['edgeitems'],
summary_insert, options['legacy'])
return lst
def _array2string_dispatcher(
a, max_line_width=None, precision=None,
suppress_small=None, separator=None, prefix=None,
style=None, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=None,
**kwarg):
return (a,)
@array_function_dispatch(_array2string_dispatcher, module='numpy')
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix="",
**kwarg):
"""
Return a string representation of an array.
Parameters
----------
a : array_like
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int or None, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix: str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
It should be noted that the content of prefix and suffix strings are
not included in the output.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
Defaults to ``numpy.get_printoptions()['threshold']``.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
Defaults to ``numpy.get_printoptions()['edgeitems']``.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
Defaults to ``numpy.get_printoptions()['sign']``.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types.
Defaults to ``numpy.get_printoptions()['floatmode']``.
Can take the following values:
- 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> np.array2string(x, precision=2, separator=',',
... suppress_small=True)
'[0.,1.,2.,3.]'
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0 0x1 0x2]'
"""
legacy = kwarg.pop('legacy', None)
if kwarg:
msg = "array2string() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
sign, formatter, floatmode, legacy)
options = _format_options.copy()
options.update(overrides)
if options['legacy'] == '1.13':
if style is np._NoValue:
style = repr
if a.shape == () and a.dtype.names is None:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn("'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning, stacklevel=3)
if options['legacy'] != '1.13':
options['linewidth'] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
needs_wrap = len(line) + len(word) > line_width
if legacy != '1.13':
s# don't wrap lines if it won't help
if len(line) <= len(next_line_prefix):
needs_wrap = False
if needs_wrap:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, line_width, next_line_prefix,
separator, edge_items, summary_insert, legacy):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
def recurser(index, hanging_indent, curr_width):
"""
By using this local function, we don't need to recurse with all the
arguments. Since this function is not created recursively, the cost is
not significant
"""
axis = len(index)
axes_left = a.ndim - axis
if axes_left == 0:
return format_function(a[index])
# when recursing, add a space to align with the [ added, and reduce the
# length of the line by 1
next_hanging_indent = hanging_indent + ' '
if legacy == '1.13':
next_width = curr_width
else:
next_width = curr_width - len(']')
a_len = a.shape[axis]
show_summary = summary_insert and 2*edge_items < a_len
if show_summary:
leading_items = edge_items
trailing_items = edge_items
else:
leading_items = 0
trailing_items = a_len
# stringify the array with the hanging indent on the first line too
s = ''
# last axis (rows) - wrap elements if they would not fit on one line
if axes_left == 1:
# the length up until the beginning of the separator / bracket
if legacy == '1.13':
elem_width = curr_width - len(separator.rstrip())
else:
elem_width = curr_width - max(len(separator.rstrip()), len(']'))
line = hanging_indent
for i in range(leading_items):
word = recurser(index + (i,), next_hanging_indent, next_width)
s, line = _extendLine(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if show_summary:
s, line = _extendLine(
s, line, summary_insert, elem_width, hanging_indent, legacy)
if legacy == '1.13':
line += ", "
else:
line += separator
for i in range(trailing_items, 1, -1):
word = recurser(index + (-i,), next_hanging_indent, next_width)
s, line = _extendLine(
s, line, word, elem_width, hanging_indent, legacy)
line += separator
if legacy == '1.13':
# width of the separator is not considered on 1.13
elem_width = curr_width
word = recurser(index + (-1,), next_hanging_indent, next_width)
s, line = _extendLine(
s, line, word, elem_width, hanging_indent, legacy)
s += line
# other axes - insert newlines between rows
else:
s = ''
line_sep = separator.rstrip() + '\n'*(axes_left - 1)
for i in range(leading_items):
nested = recurser(index + (i,), next_hanging_indent, next_width)
s += hanging_indent + nested + line_sep
if show_summary:
if legacy == '1.13':
# trailing space, fixed nbr of newlines, and fixed separator
s += hanging_indent + summary_insert + ", \n"
else:
s += hanging_indent + summary_insert + line_sep
for i in range(trailing_items, 1, -1):
nested = recurser(index + (-i,), next_hanging_indent,
next_width)
s += hanging_indent + nested + line_sep
nested = recurser(index + (-1,), next_hanging_indent, next_width)
s += hanging_indent + nested
# remove the hanging indent, and wrap in []
s = '[' + s[len(hanging_indent):] + ']'
return s
try:
# invoke the recursive part with an initial index and prefix
return recurser(index=(),
hanging_indent=next_line_prefix,
curr_width=line_width)
finally:
# recursive closures have a cyclic reference to themselves, which
# requires gc to collect (gh-10620). To avoid this problem, for
# performance and PyPy friendliness, we break the cycle:
recurser = None
def _none_or_positive_arg(x, name):
if x is None:
return -1
if x < 0:
raise ValueError("{} must be >= 0".format(name))
return x
class FloatingFormat(object):
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
**kwarg):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
self._legacy = kwarg.get('legacy', False)
if self._legacy == '1.13':
# when not 0d, legacy does not support '-'
if data.shape != () and sign == '-':
sign = ' '
self.floatmode = floatmode
if floatmode == 'unique':
self.precision = None
else:
self.precision = precision
self.precision = _none_or_positive_arg(self.precision, 'precision')
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.fillFormat(data)
def fillFormat(self, data):
# only the finite values are used to compute the number of digits
finite_vals = data[isfinite(data)]
# choose exponential mode based on the non-zero finite values:
abs_non_zero = absolute(finite_vals[finite_vals != 0])
if len(abs_non_zero) != 0:
max_val = np.max(abs_non_zero)
min_val = np.min(abs_non_zero)
with errstate(over='ignore'): # division can overflow
if max_val >= 1.e8 or (not self.suppress_small and
(min_val < 0.0001 or max_val/min_val > 1000.)):
self.exp_format = True
# do a first pass of printing all the numbers, to determine sizes
if len(finite_vals) == 0:
self.pad_left = 0
self.pad_right = 0
self.trim = '.'
self.exp_size = -1
self.unique = True
elif self.exp_format:
trim, unique = '.', True
if self.floatmode == 'fixed' or self._legacy == '1.13':
trim, unique = 'k', False
strs = (dragon4_scientific(x, precision=self.precision,
unique=unique, trim=trim, sign=self.sign == '+')
for x in finite_vals)
frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
self.exp_size = max(len(s) for s in exp_strs) - 1
self.trim = 'k'
self.precision = max(len(s) for s in frac_part)
# for back-compat with np 1.13, use 2 spaces & sign and full prec
if self._legacy == '1.13':
self.pad_left = 3
else:
# this should be only 1 or 2. Can be calculated from sign.
self.pad_left = max(len(s) for s in int_part)
# pad_right is only needed for nan length calculation
self.pad_right = self.exp_size + 2 + self.precision
self.unique = False
else:
# first pass printing to determine sizes
trim, unique = '.', True
if self.floatmode == 'fixed':
trim, unique = 'k', False
strs = (dragon4_positional(x, precision=self.precision,
fractional=True,
unique=unique, trim=trim,
sign=self.sign == '+')
for x in finite_vals)
int_part, frac_part = zip(*(s.split('.') for s in strs))
if self._legacy == '1.13':
self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
else:
self.pad_left = max(len(s) for s in int_part)
self.pad_right = max(len(s) for s in frac_part)
self.exp_size = -1
if self.floatmode in ['fixed', 'maxprec_equal']:
self.precision = self.pad_right
self.unique = False
self.trim = 'k'
else:
self.unique = True
self.trim = '.'
if self._legacy != '1.13':
# account for sign = ' ' by adding one to pad_left
if self.sign == ' ' and not any(np.signbit(finite_vals)):
self.pad_left += 1
# if there are non-finite values, may need to increase pad_left
if data.size != finite_vals.size:
neginf = self.sign != '-' or any(data[isinf(data)] < 0)
nanlen = len(_format_options['nanstr'])
inflen = len(_format_options['infstr']) + neginf
offset = self.pad_right + 1 # +1 for decimal pt
self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
def __call__(self, x):
if not np.isfinite(x):
with errstate(invalid='ignore'):
if np.isnan(x):
sign = '+' if self.sign == '+' else ''
ret = sign + _format_options['nanstr']
else: # isinf
sign = '-' if x < 0 else '+' if self.sign == '+' else ''
ret = sign + _format_options['infstr']
return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
if self.exp_format:
return dragon4_scientific(x,
precision=self.precision,
unique=self.unique,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
exp_digits=self.exp_size)
else:
return dragon4_positional(x,
precision=self.precision,
unique=self.unique,
fractional=True,
trim=self.trim,
sign=self.sign == '+',
pad_left=self.pad_left,
pad_right=self.pad_right)
@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None):
"""
Format a floating-point scalar as a decimal string in scientific notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
was omitted, print all necessary digits, otherwise digit generation is
cut off after `precision` digits and the remaining value is rounded.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
exp_digits : non-negative integer, optional
Pad the exponent with zeros until it contains at least this many digits.
If omitted, the exponent will be at least 2 digits.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_positional
Examples
--------
>>> np.format_float_scientific(np.float32(np.pi))
'3.1415927e+00'
>>> s = np.float32(1.23e24)
>>> np.format_float_scientific(s, unique=False, precision=15)
'1.230000071797338e+24'
>>> np.format_float_scientific(s, exp_digits=4)
'1.23e+0024'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
return dragon4_scientific(x, precision=precision, unique=unique,
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits)
@set_module('numpy')
def format_float_positional(x, precision=None, unique=True,
fractional=True, trim='k', sign=False,
pad_left=None, pad_right=None):
"""
Format a floating-point scalar as a decimal string in positional notation.
Provides control over rounding, trimming and padding. Uses and assumes
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
Parameters
----------
x : python float or numpy floating scalar
Value to format.
precision : non-negative integer or None, optional
Maximum number of digits to print. May be None if `unique` is
`True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
other values of the same type, by judicious rounding. If `precision`
was omitted, print out all necessary digits, otherwise digit generation
is cut off after `precision` digits and the remaining value is rounded.
If `False`, digits are generated as if printing an infinite-precision
value and stopping after `precision` digits, rounding the remaining
value.
fractional : boolean, optional
If `True`, the cutoff of `precision` digits refers to the total number
of digits after the decimal point, including leading zeros.
If `False`, `precision` refers to the total number of significant
digits, before or after the decimal point, ignoring leading zeros.
trim : one of 'k', '.', '0', '-', optional
Controls post-processing trimming of trailing digits, as follows:
* 'k' : keep trailing zeros, keep decimal point (no trimming)
* '.' : trim all trailing zeros, leave decimal point
* '0' : trim all but the zero before the decimal point. Insert the
zero if it is missing.
* '-' : trim trailing zeros and any trailing decimal point
sign : boolean, optional
Whether to show the sign for positive values.
pad_left : non-negative integer, optional
Pad the left side of the string with whitespace until at least that
many characters are to the left of the decimal point.
pad_right : non-negative integer, optional
Pad the right side of the string with whitespace until at least that
many characters are to the right of the decimal point.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
Examples
--------
>>> np.format_float_positional(np.float32(np.pi))
'3.1415927'
>>> np.format_float_positional(np.float16(np.pi))
'3.14'
>>> np.format_float_positional(np.float16(0.3))
'0.3'
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
'0.3000488281'
"""
precision = _none_or_positive_arg(precision, 'precision')
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
pad_right = _none_or_positive_arg(pad_right, 'pad_right')
return dragon4_positional(x, precision=precision, unique=unique,
fractional=fractional, trim=trim,
sign=sign, pad_left=pad_left,
pad_right=pad_right)
class IntegerFormat(object):
def __init__(self, data):
if data.size > 0:
max_str_len = max(len(str(np.max(data))),
len(str(np.min(data))))
else:
max_str_len = 0
self.format = '%{}d'.format(max_str_len)
def __call__(self, x):
return self.format % x
class BoolFormat(object):
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
self.truestr = ' True' if data.shape != () else 'True'
def __call__(self, x):
return self.truestr if x else "False"
class ComplexFloatingFormat(object):
""" Formatter for subtypes of np.complexfloating """
def __init__(self, x, precision, floatmode, suppress_small,
sign=False, **kwarg):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
floatmode_real = floatmode_imag = floatmode
if kwarg.get('legacy', False) == '1.13':
floatmode_real = 'maxprec_equal'
floatmode_imag = 'maxprec'
self.real_format = FloatingFormat(x.real, precision, floatmode_real,
suppress_small, sign=sign, **kwarg)
self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag,
suppress_small, sign='+', **kwarg)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
# add the 'j' before the terminal whitespace in i
sp = len(i.rstrip())
i = i[:sp] + 'j' + i[sp:]
return r + i
class _TimelikeFormat(object):
def __init__(self, data):
non_nat = data[~isnat(data)]
if len(non_nat) > 0:
# Max str length of non-NaT elements
max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
len(self._format_non_nat(np.min(non_nat))))
else:
max_str_len = 0
if len(non_nat) < data.size:
# data contains a NaT
max_str_len = max(max_str_len, 5)
self._format = '%{}s'.format(max_str_len)
self._nat = "'NaT'".rjust(max_str_len)
def _format_non_nat(self, x):
# override in subclass
raise NotImplementedError
def __call__(self, x):
if isnat(x):
return self._nat
else:
return self._format % self._format_non_nat(x)
class DatetimeFormat(_TimelikeFormat):
def __init__(self, x, unit=None, timezone=None, casting='same_kind',
legacy=False):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
self.legacy = legacy
# must be called after the above are configured
super(DatetimeFormat, self).__init__(x)
def __call__(self, x):
if self.legacy == '1.13':
return self._format_non_nat(x)
return super(DatetimeFormat, self).__call__(x)
def _format_non_nat(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(_TimelikeFormat):
def _format_non_nat(self, x):
return str(x.astype('i8'))
class SubArrayFormat(object):
def __init__(self, format_function):
self.format_function = format_function
def __call__(self, arr):
if arr.ndim <= 1:
return "[" + ", ".join(self.format_function(a) for a in arr) + "]"
return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
class StructuredVoidFormat(object):
"""
Formatter for structured np.void objects.
This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
as alias scalars lose their field information, and the implementation
relies upon np.void.__getitem__.
"""
def __init__(self, format_functions):
self.format_functions = format_functions
@classmethod
def from_data(cls, data, **options):
"""
This is a second way to initialize StructuredVoidFormat, using the raw data
as input. Added to avoid changing the signature of __init__.
"""
format_functions = []
for field_name in data.dtype.names:
format_function = _get_format_function(data[field_name], **options)
if data.dtype[field_name].shape != ():
format_function = SubArrayFormat(format_function)
format_functions.append(format_function)
return cls(format_functions)
def __call__(self, x):
str_fields = [
format_function(field)
for field, format_function in zip(x, self.format_functions)
]
if len(str_fields) == 1:
return "({},)".format(str_fields[0])
else:
return "({})".format(", ".join(str_fields))
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
scalartypes.c.src code, and is placed here because it uses the elementwise
formatters defined above.
"""
return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
_typelessdata = [int_, float_, complex_, bool_]
if issubclass(intc, int):
_typelessdata.append(intc)
if issubclass(longlong, int):
_typelessdata.append(longlong)
def dtype_is_implied(dtype):
"""
Determine if the given dtype is implied by the representation of its values.
Parameters
----------
dtype : dtype
Data type
Returns
-------
implied : bool
True if the dtype is implied by the representation of its values.
Examples
--------
>>> np.core.arrayprint.dtype_is_implied(int)
True
>>> np.array([1, 2, 3], int)
array([1, 2, 3])
>>> np.core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
array([1, 2, 3], dtype=int8)
"""
dtype = np.dtype(dtype)
if _format_options['legacy'] == '1.13' and dtype.type == bool_:
return False
# not just void types can be structured, and names are not part of the repr
if dtype.names is not None:
return False
return dtype.type in _typelessdata
def dtype_short_repr(dtype):
"""
Convert a dtype to a short form which evaluates to the same dtype.
The intent is roughly that the following holds
>>> from numpy import *
>>> dt = np.int64([1, 2]).dtype
>>> assert eval(dtype_short_repr(dt)) == dt
"""
if dtype.names is not None:
# structured dtypes give a list or tuple repr
return str(dtype)
elif issubclass(dtype.type, flexible):
# handle these separately so they don't give garbage like str256
return "'%s'" % str(dtype)
typename = dtype.name
# quote typenames which can't be represented as python variable names
if typename and not (typename[0].isalpha() and typename.isalnum()):
typename = repr(typename)
return typename
def _array_repr_implementation(
arr, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_repr() that allows overriding array2string."""
if max_line_width is None:
max_line_width = _format_options['linewidth']
if type(arr) is not ndarray:
class_name = type(arr).__name__
else:
class_name = "array"
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
prefix = class_name + "("
suffix = ")" if skipdtype else ","
if (_format_options['legacy'] == '1.13' and
arr.shape == () and not arr.dtype.names):
lst = repr(arr.item())
elif arr.size > 0 or arr.shape == (0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', prefix, suffix=suffix)
else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
arr_str = prefix + lst + suffix
if skipdtype:
return arr_str
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
# compute whether we should put dtype on a new line: Do so if adding the
# dtype would extend the last line past max_line_width.
# Note: This line gives the correct result even when rfind returns -1.
last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
spacer = " "
if _format_options['legacy'] == '1.13':
if issubclass(arr.dtype.type, flexible):
spacer = '\n' + ' '*len(class_name + "(")
elif last_line_len + len(dtype_str) + 1 > max_line_width:
spacer = '\n' + ' '*len(class_name + "(")
return arr_str + spacer + dtype_str
def _array_repr_dispatcher(
arr, max_line_width=None, precision=None, suppress_small=None):
return (arr,)
@array_function_dispatch(_array_repr_dispatcher, module='numpy')
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
"""
Return the string representation of an array.
Parameters
----------
arr : ndarray
Input array.
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
Floating point precision.
Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
Defaults to ``numpy.get_printoptions()['suppress']``.
Returns
-------
string : str
The string representation of an array.
See Also
--------
array_str, array2string, set_printoptions
Examples
--------
>>> np.array_repr(np.array([1,2]))
'array([1, 2])'
>>> np.array_repr(np.ma.array([0.]))
'MaskedArray([0.])'
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
>>> x = np.array([1e-6, 4e-7, 2, 3])
>>> np.array_repr(x, precision=6, suppress_small=True)
'array([0.000001, 0. , 2. , 3. ])'
"""
return _array_repr_implementation(
arr, max_line_width, precision, suppress_small)
@_recursive_guard()
def _guarded_repr_or_str(v):
if isinstance(v, bytes):
return repr(v)
return str(v)
def _array_str_implementation(
a, max_line_width=None, precision=None, suppress_small=None,
array2string=array2string):
"""Internal version of array_str() that allows overriding array2string."""
if (_format_options['legacy'] == '1.13' and
a.shape == () and not a.dtype.names):
return str(a.item())
# the str of 0d arrays is a special case: It should appear like a scalar,
# so floats are not truncated by `precision`, and strings are not wrapped
# in quotes. So we return the str of the scalar value.
if a.shape == ():
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
return _guarded_repr_or_str( | np.ndarray.__getitem__(a, ()) | numpy.ndarray.__getitem__ |
import numpy as np
import pandas as pd
import scipy.stats as stats
import os
class FDR_control:
def __init__(self):
print("__init__");
def kfilter(self, W, offset=1.0, q=0.05):
"""
Adaptive significance threshold with the knockoff filter
:param W: vector of knockoff statistics
:param offset: equal to one for strict false discovery rate control
:param q: nominal false discovery rate
:return a threshold value for which the estimated FDP is less or equal q
"""
t = np.insert( | np.abs(W[W!=0]) | numpy.abs |
import pandas as pd
import numpy as np
from numpy.linalg import inv
import math as m
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 25})
df = pd.read_csv('Dados90.csv')
VEmod=df['Vemod'][399]
VEfase=(df['Vefase'][399]*m.pi/180)
IEmod=df['Iemod'][399]
IEfase=(df['Iefase'][399]*m.pi/180)
VSmod=df['Vsmod'][399]
VSfase=(df['Vsfase'][399]*m.pi/180)
ISmod=df['Ismod'][399]
ISfase=(df['Isfase'][399]*m.pi/180)
Rex=5
Xex=48.8
Bex=3.371*(10**(-4))
n=5000
sigma = 0.04
thetamax = 8 * (10 ** (-3))
itmax = 4
alpha = 0.7
B = np.zeros(itmax + 1)
B[0] = alpha * Bex
R = np.zeros(itmax + 1)
R[0] = alpha * Rex
X = np.zeros(itmax + 1)
X[0] = alpha * Xex
np.random.seed(42)
w1=VEmod*np.ones(n)+VEmod*(sigma/3)*np.random.normal(0,1,n)
np.random.seed(41)
w2=VEfase+(thetamax/3)*np.random.normal(0,1,n)
np.random.seed(40)
w3=IEmod*np.ones(n)+IEmod*(sigma/3)*np.random.normal(0,1,n)
np.random.seed(39)
w4=IEfase+(thetamax/3)*np.random.normal(0,1,n)
np.random.seed(38)
w5=VSmod*np.ones(n)+VSmod*(sigma/3)*np.random.normal(0,1,n)
np.random.seed(37)
w6=VSfase+(thetamax/3)*np.random.normal(0,1,n)
np.random.seed(36)
w7=ISmod*np.ones(n)+ISmod*(sigma/3)*np.random.normal(0,1,n)
np.random.seed(35)
w8=ISfase+(thetamax/3)*np.random.normal(0,1,n)
realIe=np.ones(n)
imagVe=np.ones(n)
imagIe=np.ones(n)
realVe=np.ones(n)
realIs=np.ones(n)
imagVs=np.ones(n)
imagIs=np.ones(n)
realVs=np.ones(n)
for j in range(0,n):
realIe[j]=w3[j]*np.cos(w4[j])
imagVe[j]=w1[j]* | np.sin(w2[j]) | numpy.sin |
import matplotlib.pyplot as plt
import numpy as np
import os
import flopter.core.constants
import flopter.magnum.database
from flopter.magnum.magopter import Magopter
from flopter.core.lputils import MagnumProbes
import glob
import flopter.magnum.readfastadc as adc
from flopter.core import constants as c, normalise as nrm, fitters as f, lputils as lp
import flopter.magnum.database as mag
from scipy.interpolate import interp1d
import scipy.signal as sig
import concurrent.futures as cf
import pathlib as pth
import pandas as pd
# from tkinter.filedialog import askopenfilename
def main_magopter_analysis():
folders = ['2018-05-01_Leland/', '2018-05-02_Leland/', '2018-05-03_Leland/',
'2018-06-05_Leland/', '2018-06-06_Leland/', '2018-06-07_Leland/']
files = []
file_folders = []
for folder1 in folders:
os.chdir(Magopter.get_data_path() + folder1)
files.extend(glob.glob('*.adc'))
file_folders.extend([folder1] * len(glob.glob('*.adc')))
# files = [f.replace(' ', '_') for f in files]
files.sort()
# file = '2018-05-01_12h_55m_47s_TT_06550564404491814477.adc' # 8
# file = '2018-05-03_11h_31m_41s_TT_06551284859908422561.adc' # 82
files_of_interest = {
8: "First analysed",
82: "Higher Temp",
97: "Angular Sweep with different probes"
}
file_index = 82
# file = files[file_index]
file = files[-2]
ts_file = files[-1]
folder = file_folders[-2]
print(folder, file)
print(flopter.magnum.database.human_time_str(adc.get_magnumdb_timestamp(ts_file)))
print(ts_file)
magopter = Magopter(folder, ts_file)
# print(file, magopter.magnum_db.get_time_range(filename=file))
# plt.figure()
# plt.errorbar(magopter.ts_coords, magopter.ts_temp, yerr=magopter.ts_temp_d, label='Temperature')
# exit()
# length = len(magopter.t_file)
# for i in range(1, 20):
# split = int(length / i)
# plt.figure()
# plt.title('i = {}'.format(i))
# plt.log
# for j in range(i):
# plt.semilogy(magopter.t_file[j*split:j+1*split], label='j = {}'.format(j))
# plt.show()
dsr = 10
magopter.prepare(down_sampling_rate=dsr, plot_fl=True)
# magopter.trim(trim_end=0.82)
magopter.trim(trim_end=0.83)
fit_df_0, fit_df_1 = magopter.fit()
iv_data = fit_df_0.iloc[[125]]
plt.figure()
for iv_curve in magopter.iv_arrs[0]:
plt.plot(iv_curve.time, iv_curve.current)
plt.axvline(x=iv_data.index)
# Flush probe measurements
L_small = 3e-3 # m
a_small = 2e-3 # m
b_small = 3e-3 # m
g_small = 2e-3 # m
theta_f_small = np.radians(72)
L_large = 5e-3 # m
a_large = 4.5e-3 # m
b_large = 6e-3 # m
g_large = 1e-3 # m
theta_f_large = np.radians(73.3)
L_reg = 5e-3 # m
a_reg = 2e-3 # m
b_reg = 3.34e-3 # m
g_reg = 1e-3 # m
theta_f_reg = np.radians(75)
L_cyl = 4e-3 # m
g_cyl = 5e-4 # m
# T_e = 1.78 # eV
# n_e = 5.1e19 # m^-3
# fwhm = 14.3 # mm
# T_e = 0.67 # eV
# n_e = 2.3e19 # m^-3
# fwhm = 16 # mm
# T_e = 1.68
# n_e = 1.93e19
# fwhm = 16.8
# T_e = 0.75
# n_e = 1.3e20
# fwhm = 16.8
# T_e = 0.76
# n_e = 1.0e20
# fwhm = 16.8
T_e = 1.61
n_e = 1.41e20
fwhm = 12.4
deg_freedom = 3
gamma_i = (deg_freedom + 2) / 2
d_perp = 3e-4 # m
theta_p = np.radians(10)
theta_perp = np.radians(10)
probe_s = lp.AngledTipProbe(a_small, b_small, L_small, g_small, d_perp, theta_f_small, theta_p)
probe_l = lp.AngledTipProbe(a_large, b_large, L_large, g_large, d_perp, theta_f_large, theta_p)
probe_r = lp.AngledTipProbe(a_reg, b_reg, L_reg, g_reg, d_perp, theta_f_reg, theta_p)
probe_c = lp.FlushCylindricalProbe(L_cyl / 2, g_cyl, d_perp)
A_coll_s = lp.calc_probe_collection_area(a_small, b_small, L_small, g_small, d_perp, theta_perp, theta_p,
print_fl=False)
A_coll_l = lp.calc_probe_collection_area(a_large, b_large, L_large, g_large, d_perp, theta_perp, theta_p,
print_fl=False)
A_coll_r = lp.calc_probe_collection_area(a_reg, b_reg, L_reg, g_reg, d_perp, theta_perp, theta_p, print_fl=False)
A_coll_c = probe_c.get_collection_area(theta_perp)
print('Small area: {}, Large area: {}, Regular area: {}, Cylindrical area: {}'.format(A_coll_s, A_coll_l, A_coll_r,
A_coll_c))
# Plotting analytical IV over the top of the raw IVs
print(fit_df_0)
plt.figure()
# for iv_curve in magopter.iv_arr_coax_0:
# plt.plot(iv_curve.voltage, iv_curve.current)
plt.plot(iv_data[c.RAW_X].tolist()[0], iv_data[c.RAW_Y].tolist()[0], 'x', label='Raw IV')
plt.plot(iv_data[c.RAW_X].tolist()[0], iv_data[c.FIT_Y].tolist()[0], label='Fit IV')
iv_v_f = -10
I_s = lp.analytical_iv_curve(iv_data[c.RAW_X].tolist()[0], iv_v_f, T_e, n_e, theta_perp, A_coll_s, L=L_small,
g=g_small)
I_c = lp.analytical_iv_curve(iv_data[c.RAW_X].tolist()[0], iv_v_f, T_e, n_e, theta_perp, A_coll_c, L=L_small,
g=g_small)
plt.plot(iv_data[c.RAW_X].tolist()[0], I_s, label='Analytical', linestyle='dashed', linewidth=1, color='r')
# plt.plot(iv_data[c.RAW_X].tolist()[0], I_c, label='Analytical (c)', linestyle='dashed', linewidth=1, color='g')
plt.legend()
plt.title('Comparison of analytical to measured IV curves for the small area probe')
plt.xlabel('Voltage (V)')
plt.ylabel('Current (A)')
# A_coll_s = calc_probe_collection_A_alt(a_small, b_small, L_small, theta_perp, theta_p)
# A_coll_l = calc_probe_collection_A_alt(a_large, b_large, L_large, theta_perp, theta_p)
# A_coll_l = (26.25 * 1e-6) * np.sin(theta_perp + theta_p)
# print('Small area: {}, Large area: {}'.format(A_coll_s, A_coll_l))
c_s = np.sqrt((flopter.core.constants.ELEM_CHARGE * (T_e + gamma_i * T_e)) / flopter.core.constants.PROTON_MASS)
n_e_0 = fit_df_0[c.ION_SAT] / (flopter.core.constants.ELEM_CHARGE * c_s * A_coll_s)
n_e_1 = fit_df_1[c.ION_SAT] / (flopter.core.constants.ELEM_CHARGE * c_s * A_coll_c)
I_sat_0 = c_s * n_e * flopter.core.constants.ELEM_CHARGE * A_coll_s
I_sat_1 = c_s * n_e * flopter.core.constants.ELEM_CHARGE * A_coll_c
J_sat_0 = fit_df_0[c.ION_SAT] / A_coll_s
J_sat_1 = fit_df_1[c.ION_SAT] / A_coll_c
plt.figure()
plt.subplot(221)
plt.title('Electron Temperature Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$T_e$ (eV)')
plt.errorbar(fit_df_0.index, c.ELEC_TEMP, yerr=c.ERROR_STRING.format(c.ELEC_TEMP), data=fit_df_0, fmt='x',
label='Half area')
plt.errorbar(fit_df_1.index, c.ELEC_TEMP, yerr=c.ERROR_STRING.format(c.ELEC_TEMP), data=fit_df_1, fmt='x',
label='Cylinder area')
plt.axhline(y=T_e, linestyle='dashed', linewidth=1, color='r', label='TS')
plt.legend()
plt.subplot(222)
plt.title('Ion Saturation Current Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$I^+_{sat}$ (eV)')
plt.errorbar(fit_df_0.index, c.ION_SAT, yerr=c.ERROR_STRING.format(c.ION_SAT), data=fit_df_0, label='Half area',
fmt='x')
plt.errorbar(fit_df_1.index, c.ION_SAT, yerr=c.ERROR_STRING.format(c.ION_SAT), data=fit_df_1, label='Cylinder area',
fmt='x')
# for arc in magopter.arcs:
# plt.axvline(x=arc, linestyle='dashed', linewidth=1, color='r')
plt.axhline(y=I_sat_0, linestyle='dashed', linewidth=1, color='r', label='Expected I_sat (s)')
plt.legend()
# plt.figure()
# plt.subplot(223)
# plt.title('Current Density Measurements')
# plt.xlabel('Time (s)')
# plt.ylabel(r'$J_{sat}$ (Am$^{-2}$)')
# plt.plot(fit_df_0.index, J_sat_0, label='Half area')
# plt.plot(fit_df_1.index, J_sat_1, label='Cylinder area')
# for arc in magopter.arcs:
# plt.axvline(x=arc, linestyle='dashed', linewidth=1, color='r')
# plt.legend()
# plt.figure()
plt.subplot(223)
plt.title('Electron Density Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$n_e$ (m$^{-3}$)')
plt.plot(fit_df_0.index, n_e_0, 'x', label='Half Area')
plt.plot(fit_df_1.index, n_e_1, 'x', label='Cylinder Area')
plt.axhline(y=n_e, linestyle='dashed', linewidth=1, color='r', label='TS')
plt.legend()
a_s = lp.calc_sheath_expansion_param(T_e, n_e, L_small, g_small, theta_perp)
a_c = lp.calc_sheath_expansion_param(T_e, n_e, L_cyl, g_cyl, theta_perp)
print(a_s, a_c)
plt.subplot(224)
plt.title('Sheath Expansion Coefficient Measurements')
plt.xlabel('Time (s)')
plt.ylabel(r'$a$')
plt.errorbar(fit_df_0.index, c.SHEATH_EXP, yerr=c.ERROR_STRING.format(c.SHEATH_EXP), data=fit_df_0, fmt='x',
label='Half Area')
plt.errorbar(fit_df_1.index, c.SHEATH_EXP, yerr=c.ERROR_STRING.format(c.SHEATH_EXP), data=fit_df_1, fmt='x',
label='Cylinder Area')
plt.axhline(y=a_s, linestyle='dashed', linewidth=1, color='r', label='Expected - small')
plt.axhline(y=a_c, linestyle='dashed', linewidth=1, color='b', label='Expected - cyl')
plt.legend()
plt.show()
def integrated_analysis(probe_coax_0, probe_coax_1, folder, file, ts_file=None):
magopter = Magopter(folder, file, ts_filename=ts_file)
dsr = 1
magopter.prepare(down_sampling_rate=dsr, roi_b_plasma=True, crit_freq=4000, crit_ampl=None)
# magopter.trim(trim_end=0.83)
fit_df_0, fit_df_1 = magopter.fit()
theta_perp = np.radians(10)
A_coll_0 = probe_coax_0.get_collection_area(theta_perp)
A_coll_1 = probe_coax_1.get_collection_area(theta_perp)
if magopter.ts_temp is not None:
temps = [ | np.max(temp) | numpy.max |
"""
Created on 16/11/2012
@author: victor
"""
from __future__ import print_function
import unittest
import pyRMSD.RMSDCalculator
import numpy
class TestRMSDCalculators(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.expected_rmsd = {0:[ 0.60179184, 0.70814575, 0.88785042, 0.92862096, 0.69024252, 0.59267699, 0.66596155, 0.81180133, 0.87438831, 1.00465129],
1:[ 0.61473279, 0.82416178, 0.96955624, 0.71842781, 0.5359385, 0.68621908, 0.90540226, 0.83185205, 0.96145774],
2:[ 1.02156795, 1.16059055, 0.80778577, 0.72752425, 0.80478222, 0.98594799, 1.04869932, 1.01149253],
3:[ 0.69628994, 1.04059251, 0.77859792, 0.74962628, 0.73856698, 0.70444404, 0.92168545]}
cls.expected_serial_matrix = [0.60179184,0.70814575,0.88785042,0.92862096,0.69024252,0.59267699,
0.66596155,0.81180133,0.87438831,1.00465129,0.61473279,0.82416178,
0.96955624,0.71842781,0.5359385, 0.68621908,0.90540226,0.83185205,
0.96145774,1.02156795,1.16059055,0.80778577,0.72752425,0.80478222,
0.98594799,1.04869932,1.01149253,0.69628994,1.04059251,0.77859792,
0.74962628,0.73856698,0.70444404,0.92168545,1.08217543,0.86196576,
0.89731473,0.96848922,0.84721509,1.13748551,0.64892912,0.87248355,
1.00029474,1.01622641,1.10694473,0.68347196,0.83819283,0.7589582,
0.93694602,0.76944618,0.82288799,0.91196003,0.75938856,0.68278426,
0.76302383]
def setUp(self):
# print("In method", self._testMethodName)
# Each test gets a fresh coordinates set, as they will modify the coordinates
# QCP is specially sensitive to variations in input coordinates and results can vary
self.coordsets_mini = numpy.load("data/coordsets_mini.npy")
self.coordsets = numpy.load("data/coordsets.npy")
self.number_of_conformations = self.coordsets.shape[0]
self.number_of_atoms = self.coordsets.shape[1]
def test_serial_omp_pairwise(self):
"""
Calculates all matrix elements one by one with the pairwise operation.
"""
expected_rmsd_data = [0.22677106513739653, 0.44598234794295144, 0.37817804816455303]
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QTRFIT_OMP_CALCULATOR",self.coordsets_mini)
rmsd = []
for i in range(len(self.coordsets_mini)):
for j in range(i+1, len(self.coordsets_mini)):
rmsd.append(calculator.pairwise(i, j))
numpy.testing.assert_array_almost_equal(rmsd, expected_rmsd_data,8)
@unittest.skipIf(not "QCP_CUDA_CALCULATOR" in pyRMSD.RMSDCalculator.availableCalculators(),"CUDA calculator not available")
def test_theobald_cuda_pairwise(self):
"""
Calculates all matrix elements one by one with the pairwise operation.
"""
expected_rmsd_data = [0.22677106513739653, 0.44598234794295144, 0.37817804816455303]
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_CUDA_CALCULATOR",self.coordsets_mini)
rmsd = []
for i in range(len(self.coordsets_mini)):
for j in range(i+1, len(self.coordsets_mini)):
rmsd.append(calculator.pairwise(i, j))
numpy.testing.assert_array_almost_equal(rmsd, expected_rmsd_data,4)
def test_one_vs_others_serial_omp(self):
"""
Calculates the reference vs the others with the OpenMP functions.
"""
expected = [0.88785042, 0.82416178, 1.02156795, 0.69628994, 1.04059251, 0.77859792, 0.74962628, 0.73856698, 0.70444404, 0.92168545]
rmsd = pyRMSD.RMSDCalculator.RMSDCalculator("QTRFIT_OMP_CALCULATOR", self.coordsets).oneVsTheOthers(3)
numpy.testing.assert_array_almost_equal(rmsd, expected,8)
def test_kabsch_serial(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("KABSCH_SERIAL_CALCULATOR",self.coordsets)
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],8)
def test_kabsch_OpenMP(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("KABSCH_OMP_CALCULATOR", self.coordsets)
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],8)
def test_serial(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QTRFIT_SERIAL_CALCULATOR", self.coordsets )
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],8)
def test_OpenMP(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QTRFIT_OMP_CALCULATOR", self.coordsets)
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],8)
def test_theobald_serial(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_SERIAL_CALCULATOR", self.coordsets)
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],8)
def test_theobald_OpenMP(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_OMP_CALCULATOR", self.coordsets)
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],8)
@unittest.skipIf(not "QCP_CUDA_CALCULATOR" in pyRMSD.RMSDCalculator.availableCalculators(),"CUDA calculator not available")
def test_theobald_CUDA(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_CUDA_CALCULATOR", self.coordsets)
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],4)
@unittest.skipIf(not "QCP_CUDA_MEM_CALCULATOR" in pyRMSD.RMSDCalculator.availableCalculators(),"CUDA calculator not available")
def test_theobald_experimental_CUDA(self):
"""
Calculates the whole pairwise matrix by calculating each of the rows of the matrix.
"""
for conf_num in self.expected_rmsd:
calculator = pyRMSD.RMSDCalculator.RMSDCalculator( "QCP_CUDA_MEM_CALCULATOR", self.coordsets)
rmsd = calculator.oneVsFollowing(conf_num)
numpy.testing.assert_array_almost_equal(rmsd, self.expected_rmsd[conf_num],4)
def test_serial_matrix_generation(self):
"""
Calculates the whole matrix.
"""
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QTRFIT_SERIAL_CALCULATOR", self.coordsets)
rmsd = calculator.pairwiseRMSDMatrix()
numpy.testing.assert_array_almost_equal(rmsd, self.expected_serial_matrix,8)
def test_openmp_matrix_generation(self):
"""
Calculates the whole matrix.
"""
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QTRFIT_OMP_CALCULATOR", self.coordsets)
rmsd = calculator.pairwiseRMSDMatrix()
numpy.testing.assert_array_almost_equal(rmsd, self.expected_serial_matrix,8)
def test_theobald_serial_matrix_generation(self):
"""
Calculates the whole matrix.
"""
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_SERIAL_CALCULATOR", self.coordsets)
rmsd = calculator.pairwiseRMSDMatrix()
numpy.testing.assert_array_almost_equal(rmsd, self.expected_serial_matrix,8)
def test_theobald_OpenMP_matrix_generation(self):
"""
Calculates the whole matrix.
"""
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_OMP_CALCULATOR", self.coordsets)
rmsd = calculator.pairwiseRMSDMatrix()
numpy.testing.assert_array_almost_equal(rmsd, self.expected_serial_matrix,8)
@unittest.skipIf(not "QCP_CUDA_CALCULATOR" in pyRMSD.RMSDCalculator.availableCalculators(),"CUDA calculator not available")
def test_theobald_cuda_matrix_generation(self):
"""
Calculates the whole matrix.
"""
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_CUDA_CALCULATOR", self.coordsets)
rmsd = calculator.pairwiseRMSDMatrix()
numpy.testing.assert_array_almost_equal(rmsd, self.expected_serial_matrix, 4)
@unittest.skipIf(not "QCP_CUDA_MEM_CALCULATOR" in pyRMSD.RMSDCalculator.availableCalculators(),"CUDA calculator not available")
def test_theobald_cuda_experimental_matrix_generation(self):
"""
Calculates the whole matrix.
"""
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QCP_CUDA_MEM_CALCULATOR", self.coordsets)
rmsd = calculator.pairwiseRMSDMatrix()
numpy.testing.assert_array_almost_equal(rmsd, self.expected_serial_matrix, 4)
def test_coordinates_change(self):
"""
Tests if
"""
number_of_coordsets = 5;
number_of_atoms = 3239;
number_of_CAs = 224;
not_aligned_CA = numpy.reshape(numpy.loadtxt("data/ligand_mini_CAs"), (number_of_coordsets,number_of_CAs,3))
reference_copy = numpy.copy(not_aligned_CA[0])
target_copy = numpy.copy(not_aligned_CA[4])
calculator = pyRMSD.RMSDCalculator.RMSDCalculator("QTRFIT_OMP_CALCULATOR", not_aligned_CA)
rmsd = calculator.pairwise(0, 4, get_superposed_coordinates=False)
self.assertAlmostEqual(rmsd, 0.3383320758562839, 12)
# After the calculations, the target and reference coordinates have not changed, because
# a copy was performed ...
numpy.testing.assert_almost_equal(not_aligned_CA[0],reference_copy,16)
numpy.testing.assert_almost_equal(not_aligned_CA[4],target_copy,16)
# Helper functions
def read_coords_file(self, path):
file_handler = open(path, "r")
data = []
for line in file_handler:
if line.strip() != "":
parts = line.split()
data.append([float(x) for x in parts])
file_handler.close()
float_shape = data.pop(0)
shape = (int(float_shape[0]),int(float_shape[1]),int(float_shape[2]))
return | numpy.array(data) | numpy.array |
# -*- coding: utf-8 -*-
"""
@author: ruess
"""
from __future__ import division
from __future__ import absolute_import
from builtins import object
import pytest
from masci_tools.io.common_functions import (interpolate_dos, get_alat_from_bravais,
search_string, angles_to_vec,
vec_to_angles, get_version_info,
get_corestates_from_potential,
get_highest_core_state,
get_ef_from_potfile, open_general,
convert_to_pystd)
class Test_common_functions(object):
"""
Tests for the common functions from tools.common_functions
"""
def test_open_general(self):
path = '../tests/files/kkr/kkr_run_slab_nosoc/out_kkr'
f = open_general(path)
l1 = len(f.readlines())
f = open_general(f)
l2 = len(f.readlines())
assert l1==l2
assert l2>0
def test_interpolate_dos(self):
from numpy import load, loadtxt, shape
d0 = '../tests/files/interpol/complex.dos'
ef, dos, dos_int = interpolate_dos(d0, return_original=True)
assert ef == 0.5256
dos_ref = loadtxt('../tests/files/interpol/new3.dos')
assert (dos_int.reshape(shape(dos_ref))-dos_ref).max()<10**-4
assert (dos == load('../tests/files/interpol/ref_dos.npy')).all()
def test_interpolate_dos_filehandle(self):
from numpy import load, loadtxt, shape
d0 = open('../tests/files/interpol/complex.dos')
d0 = '../tests/files/interpol/complex.dos'
ef, dos, dos_int = interpolate_dos(d0, return_original=True)
assert ef == 0.5256
dos_ref = loadtxt('../tests/files/interpol/new3.dos')
assert (dos_int.reshape(shape(dos_ref))-dos_ref).max()<10**-4
assert (dos == load('../tests/files/interpol/ref_dos.npy')).all()
def test_get_alat_from_bravais(self):
from numpy import array, sqrt
bravais = | array([[0.0, 0.5, 0.5], [0.5, 0.0, 0.5], [0.5, 0.5, 0.0]]) | numpy.array |
"""Return elements chosen from `x` or `y` depending on `condition`."""
from __future__ import annotations
import numpy
import numpy.typing
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.where)
def where(condition: numpy.typing.ArrayLike, *args: PolyLike) -> ndpoly:
"""
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments a re
provided.
Args:
condition:
Where True, yield `x`, otherwise yield `y`.
x:
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns:
An array with elements from `x` where `condition` is True,
and elements from `y` elsewhere.
Examples:
>>> poly = numpoly.variable()*numpy.arange(4)
>>> poly
polynomial([0, q0, 2*q0, 3*q0])
>>> numpoly.where([1, 0, 1, 0], 7, 2*poly)
polynomial([7, 2*q0, 7, 6*q0])
>>> numpoly.where(poly, 2*poly, 4)
polynomial([4, 2*q0, 4*q0, 6*q0])
>>> numpoly.where(poly)
(array([1, 2, 3]),)
"""
if isinstance(condition, numpoly.ndpoly):
condition = numpy.any(numpy.asarray(
condition.coefficients), 0).astype(bool)
if not args:
return numpy.where(condition)
poly1, poly2 = numpoly.align_polynomials(*args)
coefficients = [numpy.where(condition, x1, x2)
for x1, x2 in zip(poly1.coefficients, poly2.coefficients)]
dtype = | numpy.result_type(poly1.dtype, poly2.dtype) | numpy.result_type |
#!/usr/bin/env python
"""A class for handling 5C analysis."""
import os
import sys
from math import log
import numpy
from scipy.stats import linregress
import h5py
from scipy.optimize import fmin_l_bfgs_b as bfgs
import libraries._fivec_binning as _binning
import libraries._fivec_optimize as _optimize
import fivec_binning
import plotting
class FiveC(object):
"""
This is the class for handling 5C analysis.
This class relies on :class:`Fragment <hifive.fragment.Fragment>` and :class:`FiveCData <hifive.fivec_data.FiveCData>` for genomic position and interaction count data. Use this class to perform filtering of fragments based on coverage, model fragment bias and distance dependence, and downstream analysis and manipulation. This includes binning of data, plotting of data, and statistical analysis.
.. note::
This class is also available as hifive.FiveC
When initialized, this class creates an h5dict in which to store all data associated with this object.
:param filename: The file name of the h5dict. This should end with the suffix '.hdf5'
:type filename: str.
:param mode: The mode to open the h5dict with. This should be 'w' for creating or overwriting an h5dict with name given in filename.
:type mode: str.
:param silent: Indicates whether to print information about function execution for this object.
:type silent: bool.
:returns: :class:`FiveC <hifive.fivec.FiveC>` class object.
:attributes: * **file** (*str.*) - A string containing the name of the file passed during object creation for saving the object to.
* **silent** (*bool.*) - A boolean indicating whether to suppress all of the output messages.
* **history** (*str.*) - A string containing all of the commands executed on this object and their outcome.
* **normalization** (*str.*) - A string stating which type of normalization has been performed on this object. This starts with the value 'none'.
In addition, many other attributes are initialized to the 'None' state.
"""
def __init__(self, filename, mode='r', silent=False):
"""Create a FiveC object."""
self.file = os.path.abspath(filename)
self.filetype = 'fivec_project'
self.silent = silent
self.binning_corrections = None
self.binning_correction_indices = None
self.binning_frag_indices = None
self.binning_num_bins = None
self.model_parameters = None
self.corrections = None
self.region_means = None
self.gamma = None
self.sigma = None
self.trans_mean = None
self.normalization = 'none'
self.history = ''
if mode != 'w':
self.load()
return None
def __getitem__(self, key):
"""Dictionary-like lookup."""
if key in self.__dict__:
return self.__dict__[key]
else:
return None
def __setitem__(self, key, value):
"""Dictionary-like value setting."""
self.__dict__[key] = value
return None
def load_data(self, filename):
"""
Load fragment-pair counts and fragment object from :class:`FiveCData <hifive.fivec_data.FiveCData>` object.
:param filename: Specifies the file name of the :class:`FiveCData <hifive.fivec_data.FiveCData>` object to associate with this analysis.
:type filename: str.
:returns: None
:Attributes: * **datafilename** (*str.*) - A string containing the relative path of the FiveCData file.
* **fragfilename** (*str.*) - A string containing the relative path of the Fragment file associated with the FiveCData file.
* **frags** (*filestream*) - A filestream to the hdf5 Fragment file such that all saved Fragment attributes can be accessed through this class attribute.
* **data** (*filestream*) - A filestream to the hdf5 FiveCData file such that all saved FiveCData attributes can be accessed through this class attribute.
* **chr2int** (*dict.*) - A dictionary that converts chromosome names to chromosome indices.
* **filter** (*ndarray*) - A numpy array of type int32 and size N where N is the number of fragments. This contains the inclusion status of each fragment with a one indicating included and zero indicating excluded and is initialized with all fragments included.
When a FiveCData object is associated with the project file, the 'history' attribute is updated with the history of the FiveCData object.
"""
self.history += "FiveC.load_data(filename='%s') - " % filename
# ensure data h5dict exists
if not os.path.exists(filename):
if not self.silent:
print >> sys.stderr, ("Could not find %s. No data loaded.\n") % (filename.split('/')[-1]),
self.history += "Error: '%s' not found\n" % filename
return None
self.datafilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(filename)),
os.path.dirname(self.file)), os.path.basename(filename))
self.data = h5py.File(filename, 'r')
self.history = self.data['/'].attrs['history'] + self.history
fragfilename = self.data['/'].attrs['fragfilename']
if fragfilename[:2] == './':
fragfilename = fragfilename[2:]
parent_count = fragfilename.count('../')
fragfilename = '/'.join(os.path.abspath(filename).split('/')[:-(1 + parent_count)] +
fragfilename.lstrip('/').split('/')[parent_count:])
self.fragfilename = "%s/%s" % (os.path.relpath(os.path.dirname(fragfilename),
os.path.dirname(self.file)), os.path.basename(fragfilename))
# ensure fend h5dict exists
if not os.path.exists(fragfilename):
if not self.silent:
print >> sys.stderr, ("Could not find %s.\n") % (fragfilename),
self.history += "Error: '%s' not found\n" % fragfilename
return None
self.frags = h5py.File(fragfilename, 'r')
# create dictionary for converting chromosome names to indices
self.chr2int = {}
for i, chrom in enumerate(self.frags['chromosomes']):
self.chr2int[chrom] = i
# create arrays
self.filter = numpy.ones(self.frags['fragments'].shape[0], dtype=numpy.int32)
self.history += 'Success\n'
return None
def save(self, out_fname=None):
"""
Save analysis parameters to h5dict.
:param filename: Specifies the file name of the :class:`FiveC <hifive.fivec.FiveC>` object to save this analysis to.
:type filename: str.
:returns: None
"""
self.history.replace("'None'", "None")
if not out_fname is None:
original_file = os.path.abspath(self.file)
if 'datafilename' in self.__dict__:
datafilename = self.datafilename
if datafilename[:2] == './':
datafilename = datafilename[2:]
parent_count = datafilename.count('../')
datafilename = '/'.join(original_file.split('/')[:-(1 + parent_count)] +
datafilename.lstrip('/').split('/')[parent_count:])
self.datafilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(datafilename)),
os.path.dirname(self.file)), os.path.basename(datafilename))
if 'fragfilename' in self.__dict__:
fragfilename = self.fragfilename
if fragfilename[:2] == './':
fragfilename = fragfilename[2:]
parent_count = fragfilename.count('../')
fragfilename = '/'.join(original_file.split('/')[:-(1 + parent_count)] +
fragfilename.lstrip('/').split('/')[parent_count:])
self.fragfilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(fragfilename)),
os.path.dirname(self.file)), os.path.basename(fragfilename))
else:
out_fname = self.file
datafile = h5py.File(out_fname, 'w')
for key in self.__dict__.keys():
if key in ['data', 'frags', 'file', 'chr2int', 'silent']:
continue
elif self[key] is None:
continue
elif isinstance(self[key], numpy.ndarray):
datafile.create_dataset(key, data=self[key])
elif not isinstance(self[key], dict):
datafile.attrs[key] = self[key]
datafile.close()
return None
def load(self):
"""
Load analysis parameters from h5dict specified at object creation and open h5dicts for associated :class:`FiveCData <hifive.fivec_data.FiveCData>` and :class:`Fragment <hifive.fragment.Fragment>` objects.
Any call of this function will overwrite current object data with values from the last :func:`save` call.
:returns: None
"""
# return attributes to init state
self.binning_corrections = None
self.binning_correction_indices = None
self.binning_frag_indices = None
self.binning_num_bins = None
self.model_parameters = None
self.corrections = None
self.region_means = None
self.gamma = None
self.sigma = None
self.trans_mean = None
self.normalization = 'none'
self.history = ''
# load data hdf5 dict
datafile = h5py.File(self.file, 'r')
for key in datafile.keys():
self[key] = numpy.copy(datafile[key])
for key in datafile['/'].attrs.keys():
self[key] = datafile['/'].attrs[key]
# ensure data h5dict exists
if 'datafilename' in self.__dict__:
datafilename = self.datafilename
if datafilename[:2] == './':
datafilename = datafilename[2:]
parent_count = datafilename.count('../')
datafilename = '/'.join(self.file.split('/')[:-(1 + parent_count)] +
datafilename.lstrip('/').split('/')[parent_count:])
if not os.path.exists(datafilename):
if not self.silent:
print >> sys.stderr, ("Could not find %s. No data loaded.\n") % (datafilename),
else:
self.data = h5py.File(datafilename, 'r')
# ensure fragment h5dict exists
if 'fragfilename' in self.__dict__:
fragfilename = self.fragfilename
if fragfilename[:2] == './':
fragfilename = fragfilename[2:]
parent_count = fragfilename.count('../')
fragfilename = '/'.join(self.file.split('/')[:-(1 + parent_count)] +
fragfilename.lstrip('/').split('/')[parent_count:])
if not os.path.exists(fragfilename):
if not self.silent:
print >> sys.stderr, ("Could not find %s. No fragments loaded.\n") % (fragfilename),
else:
self.frags = h5py.File(fragfilename, 'r')
# create dictionary for converting chromosome names to indices
self.chr2int = {}
for i, chrom in enumerate(self.frags['chromosomes']):
self.chr2int[chrom] = i
datafile.close()
return None
def filter_fragments(self, mininteractions=20, mindistance=0, maxdistance=0):
"""
Iterate over the dataset and remove fragments that do not have 'minobservations' using only unfiltered fragments and interactions falling with the distance limits specified.
In order to create a set of fragments that all have the necessary number of interactions, after each round of filtering, fragment interactions are retallied using only interactions that have unfiltered fragments at both ends.
:param mininteractions: The required number of interactions for keeping a fragment in analysis.
:type mininteractions: int.
:param mindistance: The minimum inter-fragment distance to be included in filtering.
:type mindistance: int.
:param maxdistance: The maximum inter-fragment distance to be included in filtering. A value of zero indicates no maximum cutoff.
:type maxdistance: int.
:returns: None
"""
self.history += "FiveC.filter_fragments(mininteractions=%i, mindistance=%s, maxdistance=%s) - " % (mininteractions, str(mindistance), str(maxdistance))
if not self.silent:
print >> sys.stderr, ("Filtering fragments..."),
original_count = numpy.sum(self.filter)
previous_valid = original_count + 1
current_valid = original_count
coverage = numpy.zeros(self.filter.shape[0], dtype=numpy.int32)
# copy needed arrays
data = self.data['cis_data'][...]
distances = self.frags['fragments']['mid'][data[:, 1]] - self.frags['fragments']['mid'][data[:, 0]]
if maxdistance == 0 or maxdistance is None:
maxdistance = numpy.amax(distances) + 1
valid = numpy.where((self.filter[data[:, 0]] * self.filter[data[:, 1]]) *
(distances >= mindistance) * (distances < maxdistance))[0]
data = data[valid, :]
# repeat until all remaining fragments have minobservation valid observations
while current_valid < previous_valid:
previous_valid = current_valid
coverage = numpy.bincount(data[:, 0], minlength=self.filter.shape[0])
coverage += numpy.bincount(data[:, 1], minlength=self.filter.shape[0])
invalid = numpy.where(coverage < mininteractions)[0]
self.filter[invalid] = 0
valid = numpy.where(self.filter[data[:, 0]] * self.filter[data[:, 1]])[0]
data = data[valid, :]
current_valid = numpy.sum(self.filter)
if not self.silent:
print >> sys.stderr, ("Removed %i of %i fragments\n") % (original_count - current_valid, original_count),
self.history += "Success\n"
return None
def find_distance_parameters(self):
"""
Regress log counts versus inter-fragment distances to find slope and intercept values and then find the standard deviation of corrected counts.
:returns: None
:Attributes: * **gamma** (*float*) - A float denoting the negative slope of the distance-dependence regression line.
* **sigma** (*float*) - A float denoting the standard deviation of nonzero data about the distance-dependence regression line.
* **region_means** (*ndarray*) - A numpy array of type float32 and length equal to the number of regions. This is initialized to zeros until fragment correction values are found.
"""
self.history += "FiveC.find_distance_parameters() - "
if not self.silent:
print >> sys.stderr, ("Finding distance parameters..."),
# copy needed arrays
data = self.data['cis_data'][...]
mids = self.frags['fragments']['mid'][...]
# find which pairs are both unfiltered
valid = numpy.where(self.filter[data[:, 0]] * self.filter[data[:, 1]])[0]
# find distances between fragment pairs
log_distances = numpy.log(mids[data[valid, 1]] - mids[data[valid, 0]])
# find regression line
counts = numpy.log(data[valid, 2])
if not self.corrections is None:
counts -= self.corrections[data[valid, 0]] - self.corrections[data[valid, 1]]
temp = linregress(log_distances, counts)[:2]
self.gamma = -float(temp[0])
if self.region_means is None:
self.region_means = numpy.zeros(self.frags['regions'].shape[0], dtype=numpy.float32) + temp[1]
self.sigma = float(numpy.std(counts - temp[1] + self.gamma * log_distances))
if not self.silent:
print >> sys.stderr, ("Done\n"),
self.history += "Success\n"
return None
def find_probability_fragment_corrections(self, mindistance=0, maxdistance=0, max_iterations=1000,
minchange=0.0005, learningstep=0.1, precalculate=True, regions=[],
precorrect=False):
"""
Using gradient descent, learn correction values for each valid fragment based on a Log-Normal distribution of observations.
:param mindistance: The minimum inter-fragment distance to be included in modeling.
:type mindistance: int.
:param maxdistance: The maximum inter-fragment distance to be included in modeling.
:type maxdistance: int.
:param max_iterations: The maximum number of iterations to carry on gradient descent for.
:type max_iterations: int.
:type annealing_iterations: int.
:param minchange: The cutoff threshold for early learning termination for the maximum absolute gradient value.
:type minchange: float
:param learningstep: The scaling factor for decreasing learning rate by if step doesn't meet armijo criterion.
:type learningstep: float
:param precalculate: Specifies whether the correction values should be initialized at the fragment means.
:type precalculate: bool.
:param regions: A list of regions to calculate corrections for. If set as None, all region corrections are found.
:type regions: list
:param precorrect: Use binning-based corrections in expected value calculations, resulting in a chained normalization approach.
:type precorrect: bool.
:returns: None
:Attributes: * **corrections** (*ndarray*) - A numpy array of type float32 and length equal to the number of fragments. All invalid fragments have an associated correction value of zero.
The 'normalization' attribute is updated to 'probability' or 'binning-probability', depending on if the 'precorrect' option is selected. In addition, the 'region_means' attribute is updated such that the mean correction (sum of all valid regional correction value pairs) is adjusted to zero and the corresponding region mean is adjusted the same amount but the opposite sign.
"""
self.history += "FiveC.find_probability_fragment_corrections(mindistance=%s, maxdistance=%s, max_iterations=%i, minchange=%f, learningstep=%f, precalculate=%s, regions=%s, precorrect=%s) - " % (str(mindistance), str(maxdistance), max_iterations, minchange, learningstep, precalculate, str(regions), precorrect)
if precorrect and self.binning_corrections is None:
if not self.silent:
print >> sys.stderr, ("Precorrection can only be used in project has previously run 'find_binning_fragment_corrections'.\n"),
self.history += "Error: 'find_binning_fragment_corrections()' not run yet\n"
return None
if self.corrections is None:
self.corrections = numpy.zeros(self.frags['fragments'].shape[0], dtype=numpy.float32)
# if regions not given, set to all regions
if regions == None or len(regions) == 0:
regions = numpy.arange(self.frags['regions'].shape[0])
# determine if distance parameters have been calculated
if self.gamma is None:
self.find_distance_parameters()
# limit corrections to only requested regions
filt = numpy.copy(self.filter)
for i in range(self.frags['regions'].shape[0]):
if i not in regions:
filt[self.frags['regions']['start_frag'][i]:self.frags['regions']['stop_frag'][i]] = 0
# copy and calculate needed arrays
if not self.silent:
print >> sys.stderr, ("\r%s\rCopying needed data...") % (' ' * 80),
data = self.data['cis_data'][...]
distances = self.frags['fragments']['mid'][data[:, 1]] - self.frags['fragments']['mid'][data[:, 0]]
if maxdistance == 0 or maxdistance is None:
maxdistance = numpy.amax(distances) + 1
valid = numpy.where((filt[data[:, 0]] * filt[data[:, 1]]) *
(distances >= mindistance) * (distances < maxdistance))[0]
data = data[valid, :]
distances = numpy.log(distances[valid])
counts_n = numpy.log(data[:, 2] - 0.5).astype(numpy.float32)
counts = numpy.log(data[:, 2]).astype(numpy.float32)
counts_p = numpy.log(data[:, 2] + 0.5).astype(numpy.float32)
distance_signal = (-self.gamma * distances).astype(numpy.float32)
distance_signal += self.region_means[self.frags['fragments']['region'][data[:, 0]]]
# create empty arrays
gradients = numpy.zeros(self.filter.shape[0], dtype=numpy.float32)
valid = numpy.where(filt)[0]
# find number of interactions for each fragment
interactions = numpy.bincount(data[:, 0], minlength=self.filter.shape[0]).astype(numpy.int32)
interactions += numpy.bincount(data[:, 1], minlength=self.filter.shape[0]).astype(numpy.int32)
interactions = numpy.maximum(1, interactions)
# if precalculation requested, find fragment means
if precalculate:
enrichments = counts - distance_signal
count_sums = numpy.bincount(data[:, 0], weights=enrichments, minlength=gradients.shape[0])
count_sums += numpy.bincount(data[:, 1], weights=enrichments, minlength=gradients.shape[0])
self.corrections = ((count_sums / numpy.maximum(1, interactions)) * 0.5).astype(numpy.float32)
if precorrect:
if not self.silent:
print >> sys.stderr, ("\r%s\rFinding binning corrections...") % (' ' * 80),
_optimize.find_binning_correction_adjustment(distance_signal,
data,
self.binning_corrections,
self.binning_correction_indices,
self.binning_num_bins,
self.binning_frag_indices)
# cycle through learning phases
if not self.silent:
print >> sys.stderr, ("\r%s\rLearning corrections...") % (' ' * 80),
iteration = 0
cont = True
change = numpy.inf
new_corrections = numpy.copy(self.corrections)
start_cost = _optimize.calculate_prob_cost(data,
counts_n,
counts,
counts_p,
distance_signal,
self.corrections,
self.sigma)
previous_cost = start_cost
while cont:
iteration += 1
# find gradients
gradients.fill(0.0)
_optimize.calculate_gradients(data,
counts_n,
counts,
counts_p,
distance_signal,
self.corrections,
gradients,
self.sigma)
# find best step size
armijo = numpy.inf
t = 0.1
gradients /= interactions
gradient_norm = numpy.sum(gradients[valid] ** 2.0)
j = 0
best_score = numpy.inf
best_t = 0.1
while armijo > 0.0:
# update gradients
_optimize.update_corrections(filt,
self.corrections,
new_corrections,
gradients,
t)
cost = _optimize.calculate_prob_cost(data,
counts_n,
counts,
counts_p,
distance_signal,
new_corrections,
self.sigma)
if numpy.isnan(cost):
cost = numpy.inf
armijo = numpy.inf
else:
armijo = cost - previous_cost + t * gradient_norm
if cost < best_score:
best_score = cost
best_t = t
if not self.silent:
print >> sys.stderr, ("\r%s iteration:%i cost:%f change:%f armijo: %f %s") %\
('Learning corrections...', iteration, previous_cost,
change, armijo, ' ' * 20),
t *= learningstep
j += 1
if j == 20:
armijo = -numpy.inf
t = best_t
_optimize.update_corrections(filt,
self.corrections,
new_corrections,
gradients,
t)
cost = _optimize.calculate_prob_cost(data,
counts_n,
counts,
counts_p,
distance_signal,
new_corrections,
self.sigma)
previous_cost = cost
self.corrections = new_corrections
change = numpy.amax(numpy.abs(gradients[valid] / new_corrections[valid]))
if not self.silent:
print >> sys.stderr, ("\r%s iteration:%i cost:%f change:%f %s") %\
('Learning corrections...', iteration, cost, change, ' ' * 40),
iteration += 1
if iteration >= max_iterations or change <= minchange:
cont = False
if not self.silent:
print >> sys.stderr, ("\r%s\rLearning corrections... Initial Cost: %f Final Cost: %f Done\n") %\
(' ' * 80, start_cost, cost),
# Calculate region means
if self.region_means is None:
self.region_means = numpy.zeros(self.frags['regions'].shape[0], dtype=numpy.float32)
for i in regions:
start = self.frags['regions']['start_frag'][i]
stop = self.frags['regions']['stop_frag'][i]
forward = (numpy.where(self.filter[start:stop] *
(self.frags['fragments']['strand'][start:stop] == 0))[0] + start)
reverse = (numpy.where(self.filter[start:stop] *
(self.frags['fragments']['strand'][start:stop] == 1))[0] + start)
if forward.shape[0] == 0 or reverse.shape[0] == 0:
continue
region_mean = (numpy.sum(self.corrections[forward]) * reverse.shape[0] +
numpy.sum(self.corrections[reverse]) * forward.shape[0])
region_mean /= forward.shape[0] * reverse.shape[0]
self.corrections[forward] -= region_mean / 2.0
self.corrections[reverse] -= region_mean / 2.0
self.region_means[i] += region_mean
if precorrect:
self.normalization = 'binning-probability'
else:
self.normalization = 'probability'
self.history += 'Succcess\n'
return None
def find_express_fragment_corrections(self, mindistance=0, maxdistance=0, iterations=1000, remove_distance=False,
usereads='cis', regions=[], precorrect=False, logged=True, kr=False):
"""
Using iterative approximation, learn correction values for each valid fragment.
:param mindistance: The minimum inter-fragment distance to be included in modeling.
:type mindistance: int.
:param maxdistance: The maximum inter-fragment distance to be included in modeling.
:type maxdistance: int.
:param iterations: The number of iterations to use for learning fragment corrections.
:type iterations: int.
:param remove_distance: Specifies whether the estimated distance-dependent portion of the signal is removed prior to learning fragment corrections.
:type remove_distance: bool.
:param usereads: Specifies which set of interactions to use, 'cis', 'trans', or 'all'.
:type usereads: str.
:param regions: A list of regions to calculate corrections for. If set as None, all region corrections are found.
:type regions: list
:param precorrect: Use binning-based corrections in expected value calculations, resulting in a chained normalization approach.
:type precorrect: bool.
:param logged: Use log-counts instead of counts for learning.
:type logged: bool.
:param kr: Use the Knight Ruiz matrix balancing algorithm instead of weighted matrix balancing. This option ignores 'iterations' and 'logged'.
:type kr: bool.
:returns: None
Calling this function creates the following attributes:
:Attributes: * **corrections** (*ndarray*) - A numpy array of type float32 and length equal to the number of fragments. All invalid fragments have an associated correction value of zero.
The 'normalization' attribute is updated to 'express' or 'binning-express', depending on if the 'precorrect' option is selected. In addition, if the 'remove_distance' option is selected, the 'region_means' attribute is updated such that the mean correction (sum of all valid regional correction value pairs) is adjusted to zero and the corresponding region mean is adjusted the same amount but the opposite sign.
"""
self.history += "FiveC.find_express_fragment_corrections(mindistance=%s, maxdistance=%s, iterations=%i, remove_distance=%s, usereads='%s', regions=%s, precorrect=%s, logged=%s, kr=%s) - " % (str(mindistance), str(maxdistance), iterations, remove_distance, usereads, str(regions), precorrect, logged, kr)
if precorrect and self.binning_corrections is None:
if not self.silent:
print >> sys.stderr, ("Precorrection can only be used in project has previously run 'find_binning_fragment_corrections'.\n"),
self.history += "Error: 'find_binning_fragment_corrections()' not run yet\n"
return None
# make sure usereads has a valid value
if usereads not in ['cis', 'trans', 'all']:
if not self.silent:
print >> sys.stderr, ("'usereads' does not have a valid value.\n"),
self.history += "Error: '%s' not a valid value for 'usereads'\n" % usereads
return None
# if regions not given, set to all regions
if regions == None or len(regions) == 0:
regions = numpy.arange(self.frags['regions'].shape[0])
if self.corrections is None:
self.corrections = numpy.zeros(self.frags['fragments'].shape[0], dtype=numpy.float32)
if kr:
self._find_kr_corrections(mindistance, maxdistance, remove_distance,
usereads, regions, precorrect, logged)
return None
# limit corrections to only requested regions
filt = numpy.copy(self.filter)
for i in range(self.frags['regions'].shape[0]):
if i not in regions:
filt[self.frags['regions']['start_frag'][i]:self.frags['regions']['stop_frag'][i]] = 0
if not self.silent:
print >> sys.stderr, ("\r%s\rCopying needed data...") % (' ' * 80),
# copy and calculate needed arrays
data = None
trans_data = None
counts = None
trans_counts = None
distance_signal = None
trans_signal = None
corrections = numpy.copy(self.corrections)
if usereads in ['cis', 'all']:
data = self.data['cis_data'][...]
distances = (self.frags['fragments']['mid'][data[:, 1]] -
self.frags['fragments']['mid'][data[:, 0]]).astype(numpy.float32)
if maxdistance == 0 or maxdistance is None:
maxdistance = numpy.amax(distances) + 1
valid = numpy.where((filt[data[:, 0]] * filt[data[:, 1]]) *
(distances >= mindistance) * (distances < maxdistance))[0]
data = data[valid, :]
counts = numpy.log(data[:, 2]).astype(numpy.float64)
distances = distances[valid]
if remove_distance:
if self.gamma is None:
self.find_distance_parameters()
distance_signal = (-self.gamma * | numpy.log(distances) | numpy.log |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import numpy as np
import os, sys, shutil, subprocess, glob
import re
from numpy import pi
from scipy import *
import json
from tabulate import tabulate
from itertools import chain
import flapwmbpt_ini
import prepare_realaxis
# from scipy.interpolate import interp1d
# trans_basis_mode: 0, use wannier function as basis set
# trans_basis_mode: 1, use transformation matrix to rotate the basis set. this matrix doesn't change as a function of iteration.
# trans_basis_mode: 2, use transformation matrix to rotate the basis set. this matrix does change as a function of iteration. this matrix diagonalize the spectral function at the chemical potential.
def open_h_log(control):
if (control['restart']):
control['h_log']=open('./cmd.log', 'a')
else:
control['h_log']=open('./cmd.log', 'w')
print('', file=control['h_log'],flush=True)
print('*********************************',file=control['h_log'],flush=True)
print(' ComDMFT', file=control['h_log'],flush=True)
print('*********************************',file=control['h_log'],flush=True)
print('', file=control['h_log'],flush=True)
#DEBUG
control['h_log'].flush()
os.fsync(control['h_log'].fileno())
#DEBUG
return None
def close_h_log(control):
control['h_log'].close()
return None
def read_comdmft_ini_control():
vglobl={}
vlocal={}
with open('comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
control=vlocal['control']
return control
def read_comdmft_ini_postprocessing():
vglobl={}
vlocal={}
with open('comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
control=vlocal['control']
postprocessing_dict=vlocal['postprocessing']
check_key_in_string('mpi_prefix', control)
check_key_in_string('comsuite_dir', postprocessing_dict)
if (control['method']=='spectral') | (control['method']=='band'):
with open(postprocessing_dict['comsuite_dir']+'/comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
control_temp=vlocal['control']
postprocessing_dict['kpoints']=postprocessing_dict.get('kpoints', os.path.abspath(postprocessing_dict['comsuite_dir']+'/'+control_temp['initial_lattice_dir'])+'/kpoints')
if ((control['method']=='dos') | (control['method']=='dos_qp')):
check_key_in_string('kmesh', postprocessing_dict)
if ((control['method']=='spectral') | (control['method']=='dos')):
check_key_in_string('self energy', postprocessing_dict)
postprocessing_dict['broadening']=postprocessing_dict.get('broadening', 0.01)
return control, postprocessing_dict
def read_comdmft_ini():
vglobl={}
vlocal={}
with open('comdmft.ini') as f_ini:
code = compile(f_ini.read(), "comdmft.ini", 'exec')
exec(code, vglobl, vlocal)
f_ini.close()
# print vglobl
# print 'here'
control=vlocal['control']
wan_hmat=vlocal['wan_hmat']
imp=vlocal['imp']
control['name']='control'
wan_hmat['name']='wan_hmat'
imp['name']='imp'
control['restart']=control.get('restart', False)
open_h_log(control)
control['comsuitedir']=os.environ.get('COMSUITE_BIN')
if not control['comsuitedir']:
print("Error: Environment variable COMSUITE_BIN is not defined.", file=control['h_log'],flush=True)
sys.exit()
print('comsuitedir', control['comsuitedir'])
control['conv_table']=[]
### in control
control['cal_mu']=control.get('cal_mu', True)
control['top_dir']=os.path.abspath('./')
check_key_in_string('method', control)
control['sigma_mix_ratio']=control.get('sigma_mix_ratio', 0.5)
control['doping']=control.get('doping', 0.0)
control['dc_mode']=control.get('dc_mode', 'dc_at_gw')
control['u_mode']=control.get('u_mode', 'bnse')
control['trans_basis_mode']=control.get('trans_basis_mode', 0)
if (control['trans_basis_mode']==1):
check_key_in_string('trans_basis', control)
elif (control['trans_basis_mode']==2):
check_key_in_string('metal_threshold', control)
check_key_in_string('spin_orbit', control)
check_key_in_string('impurity_problem', control)
check_key_in_string('impurity_problem_equivalence', control)
check_key_in_string('initial_lattice_dir', control)
control['initial_lattice_dir']=os.path.abspath(control['initial_lattice_dir'])
control['allfile']=find_allfile(control['initial_lattice_dir'])
if ('dc_directory' not in control):
control['dc_directory']='./dc'
control['dc_directory']=os.path.abspath(control['dc_directory'])
if ('impurity_directory' not in control):
control['impurity_directory']='./impurity'
control['impurity_directory']=os.path.abspath(control['impurity_directory'])
if ('lowh_directory' not in control):
control['lowh_directory']='./lowh'
control['lowh_directory']=os.path.abspath(control['lowh_directory'])
if ('wannier_directory' not in control):
control['wannier_directory']='./wannier'
control['wannier_directory']=os.path.abspath(control['wannier_directory'])
if ('initial_self_energy' in control):
control['initial_self_energy'] =os.path.abspath(control['initial_self_energy'])
if (control['trans_basis_mode']!=0):
check_key_in_string('trans_basis', control)
if ('dc_mat_to_read' in control):
control['dc_mat_to_read'] =os.path.abspath(control['dc_mat_to_read'])
if (control['method']=='lda+dmft'):
control['convergence_header']=['step','i_outer','i_latt','i_imp','causality','delta_rho','w_sp_min','w_sp_max', 'mu', 'std_sig', 'n_imp', 'histo_1', 'histo_2', 'ctqmc_sign']
if (control['method']=='lqsgw+dmft'):
control['convergence_header']=['step','i_imp','causality','static_f0','w_sp_min','w_sp_max', 'mu', 'std_sig', 'n_imp', 'histo_1', 'histo_2', 'ctqmc_sign']
# mpi_prefix
if ('mpi_prefix' in control):
control['mpi_prefix_flapwmbpt']=control.get('mpi_prefix_flapwmbpt', control['mpi_prefix'])
control['mpi_prefix_lowh']=control.get('mpi_prefix_lowh', control['mpi_prefix'])
control['mpi_prefix_impurity']=control.get('mpi_prefix_impurity', control['mpi_prefix'])
control['mpi_prefix_wannier']=control.get('mpi_prefix_wannier', control['mpi_prefix'])
if (control['method']=='lda+dmft'):
control['mpi_prefix_lattice']=control.get('mpi_prefix_lattice', control['mpi_prefix'])
if (control['method']=='lqsgw+dmft'):
control['mpi_prefix_dc']=control.get('mpi_prefix_dc', control['mpi_prefix'])
# mpi_prefix_coulomb
if ('mpi_prefix_coulomb' in control):
check_key_in_string('nproc_k_coulomb', control)
check_key_in_string('nproc_tau_coulomb', control)
else:
# temp=[int(x) for x in np.loadtxt(control['initial_lattice_dir']+'/k_tau_freq.dat')]
temp=list(map(int,np.loadtxt(control['initial_lattice_dir']+'/k_tau_freq.dat')))
control['mpi_prefix_coulomb'], control['nproc_k_coulomb'],control['nproc_tau_coulomb']=optimized_nproc_for_comcoulomb(control['mpi_prefix'], temp[0], temp[1],temp[2],temp[3])
# print('mpi_prefix_coulomb', control['mpi_prefix_coulomb'], file=control['h_log'],flush=True)
# max iteration
if (control['method']=='lda+dmft'):
control['max_iter_num_impurity']=control.get('max_iter_num_impurity', 1)
control['max_iter_num_outer']=control.get('max_iter_num_outer', 50)
elif (control['method']=='lqsgw+dmft'):
control['max_iter_num_impurity']=control.get('max_iter_num_impurity', 50)
# directory_name
if (control['method']=='lda+dmft'):
if ('lattice_directory' not in control):
control['lattice_directory']='./lattice'
control['lattice_directory']=os.path.abspath(control['lattice_directory'])
if (control['method']=='lqsgw+dmft'):
if ('coulomb_directory' not in control):
control['coulomb_directory']='./coulomb'
control['coulomb_directory']=os.path.abspath(control['coulomb_directory'])
if (control['method']=='lqsgw+dmft'):
control['do_wannier']=True
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
control['iter_num_outer']=1
elif (control['method']=='lda+dmft'):
control['iter_num_outer']=1
control['iter_num_impurity']=0
if (control['restart']):
find_place_to_restart(control)
if (control['method']=='lqsgw+dmft'):
print('do_wannier', control['do_wannier'], file=control['h_log'],flush=True)
print('do_coulomb', control['do_coulomb'], file=control['h_log'],flush=True)
print('do_dc', control['do_dc'], file=control['h_log'],flush=True)
# in wan_hmat
check_key_in_string('kgrid', wan_hmat)
check_key_in_string('froz_win_min', wan_hmat)
check_key_in_string('froz_win_max', wan_hmat)
wan_hmat['write_wan']=wan_hmat.get('write_wan', False)
wan_hmat['dis_win_min']=wan_hmat.get('dis_win_min', wan_hmat['froz_win_min'])
wan_hmat['dis_win_max']=wan_hmat.get('dis_win_max', wan_hmat['froz_win_max']+40.0)
control['proj_win_min']=control.get('proj_win_min', wan_hmat['dis_win_min'])
control['proj_win_max']=control.get('proj_win_max', wan_hmat['dis_win_max'])
wan_hmat['num_iter']=wan_hmat.get('num_iter', 0)
wan_hmat['dis_num_iter']=wan_hmat.get('dis_num_iter', 100)
wan_hmat['cut_low']=wan_hmat.get('cut_low', 0.4)
wan_hmat['cut_froz']=wan_hmat.get('cut_froz', 0.10)
wan_hmat['cut_total']=wan_hmat.get('cut_total', 0.0)
if (control['method']=='lqsgw+dmft'):
wan_hmat['rmode']=wan_hmat.get('rmode', 0)
wan_hmat['radfac']=wan_hmat.get('radfac', 1.0)
if (control['method']=='lda+dmft'):
wan_hmat['rmode']=wan_hmat.get('rmode', 0)
wan_hmat['radfac']=wan_hmat.get('radfac', 1.0)
# in imp
check_key_in_string('temperature', imp)
imp['beta']=1.0/(8.6173303*10**-5*imp['temperature'])
if ('initial_self_energy' in control):
control['n_omega']=np.shape(np.loadtxt(control['initial_self_energy']))[0]
else:
control['n_omega']=int(300.0/(2*pi/imp['beta']))
control['omega']=(np.arange(control['n_omega'])*2+1)*pi/imp['beta']
for key, value in imp.items():
if (not (isinstance(imp[key], dict))):
continue
imp[key]['name']=key
# imp[key]['para']=True
# for ktemp in control['impurity_problem_equivalence'] :
# if (ktemp == -1):
# imp[key]['para']=False
if (-1*int(key) in control['impurity_problem_equivalence']):
imp[key]['para']=False
else:
imp[key]['para']=True
imp[key]['problem']=control['impurity_problem'][control['impurity_problem_equivalence'].index(int(key))][1]
if (control['method']=='lda+dmft'):
check_key_in_string('f0', imp[key])
if ((imp[key]['problem']=='p') | (imp[key]['problem']=='d') | (imp[key]['problem']=='f')):
check_key_in_string('f2', imp[key])
if ((imp[key]['problem']=='d') | (imp[key]['problem']=='f')):
check_key_in_string('f4', imp[key])
if (imp[key]['problem']=='f'):
check_key_in_string('f6', imp[key])
# elif (control['method']=='lqsgw+dmft'):
# check_key_in_string('boson_low_truncation', imp[key])
check_key_in_string('thermalization_time', imp[key])
check_key_in_string('measurement_time', imp[key])
check_key_in_string('impurity_matrix', imp[key])
if (control['trans_basis_mode']<2):
imp[key]['impurity_matrix']=np.array(imp[key]['impurity_matrix'])
else:
print("impurity_matrix reset", file=control['h_log'],flush=True)
nimp_orb=len(imp[key]['impurity_matrix'])
imp[key]['impurity_matrix']=np.zeros((nimp_orb,nimp_orb), dtype='int')
for ii in range(nimp_orb):
imp[key]['impurity_matrix'][ii,ii]=ii+1
print('here', file=control['h_log'],flush=True)
print(type(imp[key]['impurity_matrix']), file=control['h_log'],flush=True)
print(imp[key]['impurity_matrix'], file=control['h_log'],flush=True)
print('here', file=control['h_log'],flush=True)
if (control['method']=='lda+dmft'):
check_key_in_string('nominal_n', imp[key])
check_key_in_string('green_cutoff', imp[key])
imp[key]['susceptibility_cutoff']=imp[key].get('susceptibility_cutoff', 50)
imp[key]['susceptibility_tail']=imp[key].get('susceptibility_tail', 300)
if ('coulomb' not in imp[key]):
imp[key]["coulomb"]='full'
control['sig_header']=['# omega(eV)']
for ii in sorted(set(control['impurity_problem_equivalence'])):
for jj in sorted(set(imp[str(abs(ii))]['impurity_matrix'].flatten().tolist())-{0}):
control['sig_header'].append("Re Sig_{"+str(ii)+','+str(jj)+'}(eV)')
control['sig_header'].append("Im Sig_{"+str(ii)+','+str(jj)+'}(eV)')
# check hdf5
if (os.path.isdir(control['initial_lattice_dir']+"/checkpoint/")):
control['hdf5']=False
else:
control['hdf5']=True
print('hdf5', control['hdf5'],file=control['h_log'],flush=True)
# print
print('top_dir', control['top_dir'], file=control['h_log'],flush=True)
if (control['method']=='lda+dmft'):
print('lattice_directory', control['lattice_directory'], file=control['h_log'],flush=True)
elif (control['method']=='lqsgw+dmft'):
print('coulomb_directory', control['coulomb_directory'], file=control['h_log'],flush=True)
print('wannier_directory', control['wannier_directory'], file=control['h_log'],flush=True)
print('dc_directory', control['dc_directory'], file=control['h_log'],flush=True)
print('impurity_directory', control['impurity_directory'], file=control['h_log'],flush=True)
print('lowh_directory', control['lowh_directory'], file=control['h_log'],flush=True)
return control,wan_hmat,imp
def find_impurity_wan(control, wan_hmat):
num_wann=np.shape(wan_hmat['basis'])[0]
control['impurity_wan']=[]
for ip in range(np.shape(control['impurity_problem'])[0]):
if (control['spin_orbit']):
if (control['impurity_problem'][ip][1].lower()=='f'):
control['impurity_wan'].append([0]*14)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==3)):
if (int(wan_hmat['basis'][iwan]['i']*2)==-1):
if (int(wan_hmat['basis'][iwan]['m']*2)==-5):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-3):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-1):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==1):
control['impurity_wan'][ip][3]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==3):
control['impurity_wan'][ip][4]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==5):
control['impurity_wan'][ip][5]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['i']*2)==1):
if (int(wan_hmat['basis'][iwan]['m']*2)==-7):
control['impurity_wan'][ip][6]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-5):
control['impurity_wan'][ip][7]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-3):
control['impurity_wan'][ip][8]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==-1):
control['impurity_wan'][ip][9]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==1):
control['impurity_wan'][ip][10]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==3):
control['impurity_wan'][ip][11]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==5):
control['impurity_wan'][ip][12]=wan_hmat['basis'][iwan]['ind']
elif (int(wan_hmat['basis'][iwan]['m']*2)==7):
control['impurity_wan'][ip][13]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
else:
if (control['impurity_problem'][ip][1].lower()=='s'):
control['impurity_wan'].append([0]*1)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==0)):
if (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
elif (control['impurity_problem'][ip][1].lower()=='p'):
control['impurity_wan'].append([0]*3)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==1)):
if (wan_hmat['basis'][iwan]['m']==-1):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==1):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
elif (control['impurity_problem'][ip][1].lower()=='d'):
control['impurity_wan'].append([0]*5)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==2)):
if (wan_hmat['basis'][iwan]['m']==-2):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-1):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==1):
control['impurity_wan'][ip][3]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==2):
control['impurity_wan'][ip][4]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
elif (control['impurity_problem'][ip][1].lower()=='f'):
control['impurity_wan'].append([0]*7)
for iwan in range(num_wann):
if ((wan_hmat['basis'][iwan]['atom']==control['impurity_problem'][ip][0]) and (wan_hmat['basis'][iwan]['l']==3)):
if (wan_hmat['basis'][iwan]['m']==-3):
control['impurity_wan'][ip][0]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-2):
control['impurity_wan'][ip][1]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-1):
control['impurity_wan'][ip][2]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==-0):
control['impurity_wan'][ip][3]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==1):
control['impurity_wan'][ip][4]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==2):
control['impurity_wan'][ip][5]=wan_hmat['basis'][iwan]['ind']
elif (wan_hmat['basis'][iwan]['m']==3):
control['impurity_wan'][ip][6]=wan_hmat['basis'][iwan]['ind']
if (control['impurity_wan'][ip].count(0) !=0):
print('something wrong in find_impurity_wan', file=control['h_log'],flush=True)
sys.exit()
return None
def initial_file_directory_setup(control):
directory_setup(control)
if (control['method'] == 'lda+dmft'):
print('iter_num_impurity', control['iter_num_impurity'], ' max_iter_num_impurity', control['max_iter_num_impurity'], file=control['h_log'],flush=True)
print('iter_num_outer', control['iter_num_outer'], ' max_iter_num_outer', control['max_iter_num_outer'], file=control['h_log'],flush=True)
elif (control['method'] == 'lqsgw+dmft'):
print('iter_num_impurity', control['iter_num_impurity'], file=control['h_log'],flush=True)
print('max_iter_num_impurity', control['max_iter_num_impurity'], file=control['h_log'],flush=True)
return None
def find_place_to_restart(control):
if (control['method']=='lqsgw+dmft'):
control['conv_table']=read_convergence_table(control)
# print(control['conv_table'], file=control['h_log'],flush=True)
if (len(control['conv_table'])>0):
n_imp_problem=np.amax(control['impurity_problem_equivalence'])
last_step=control['conv_table'][-1][0].strip().split('_')[0]
last_imp_iter=control['conv_table'][-1][1].strip()
if (len(control['conv_table'][-1][0].strip().split('_')) > 1):
last_imp=control['conv_table'][-1][0].strip().split('_')[1]
print(last_step, last_imp, last_imp_iter, file=control['h_log'],flush=True)
else:
print(last_step, last_imp_iter, file=control['h_log'],flush=True)
if last_step == 'wannier':
control['do_wannier']=False
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
elif last_step == 'coulomb':
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=True
control['iter_num_impurity']=1
elif last_step == 'dc':
if (int(last_imp) == n_imp_problem):
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=False
control['iter_num_impurity']=1
else:
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=True
control['iter_num_impurity']=1
for ii in range(int(last_imp)):
control['conv_table'].pop(-1)
elif (last_step == 'delta'):
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=False
control['iter_num_impurity']=int(last_imp_iter)
control['conv_table'].pop(-1)
elif (last_step == 'impurity'):
if (int(last_imp) == n_imp_problem):
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=False
control['iter_num_impurity']=int(last_imp_iter)+1
else:
control['do_wannier']=False
control['do_coulomb']=False
control['do_dc']=True
control['iter_num_impurity']=int(last_imp_iter)
for ii in range(int(last_imp)):
control['conv_table'].pop(-1)
else:
control['do_wannier']=True
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
else:
control['do_wannier']=True
control['do_coulomb']=True
control['do_dc']=True
control['iter_num_impurity']=1
elif (control['method']=='lda+dmft'):
control['conv_table']=read_convergence_table(control)
if (len(control['conv_table'])>0):
linecnt=0
for ii in range(np.shape(control['conv_table'])[0]):
if control['conv_table'][ii][0].strip()=='dft':
linecnt=ii
control['iter_num_outer']=int(control['conv_table'][ii][1])
for ii in range(linecnt, np.shape(control['conv_table'])[0]):
control['conv_table'].pop(-1)
return None
# def find_iter_num_for_restart(control):
# if (control['restart']):
# line_count=sum(1 for line in open(control['top_dir']+'/convergence.log'))
# if (line_count <=1):
# if (control['method']=='lda+dmft'):
# iter_num_outer=1
# elif (control['method']=='lqsgw+dmft'):
# iter_num_impurity=1
# else:
# if (control['method']=='lda+dmft'):
# iter_num_outer=1
# ff=open(control['top_dir']+'/convergence.log', 'r')
# firstline=ff.readline()
# for line in ff:
# temp=line.split()
# if (temp[0] == 'dft'):
# iter_num_outer=int(temp[1])
# ff.close()
# elif (control['method']=='lqsgw+dmft'):
# iter_num_impurity=1
# ff=open(control['top_dir']+'/convergence.log', 'r')
# firstline=ff.readline()
# for line in ff:
# temp=line.split()
# temp1=temp[0]
# if (temp1 == 'impurity'):
# iter_num_impurity=int(temp[2])
# ff.close()
# else:
# if (control['method']=='lda+dmft'):
# iter_num_outer=1
# elif (control['method']=='lqsgw+dmft'):
# iter_num_impurity=1
# if (control['method']=='lda+dmft'):
# return iter_num_outer
# elif (control['method']=='lqsgw+dmft'):
# return iter_num_impurity
def initial_lattice_directory_setup(control):
os.chdir(control['lattice_directory'])
if control['hdf5']:
files = glob.iglob(control['initial_lattice_dir']+"/*.rst")
for filename in files:
shutil.copy(filename, './')
else:
files = glob.iglob(control['initial_lattice_dir']+"/checkpoint/*.rst")
for filename in files:
shutil.copy(filename, './checkpoint/')
files = glob.iglob(control['initial_lattice_dir']+"/*el_density")
for filename in files:
shutil.copy(filename, './')
if os.path.exists(control['initial_lattice_dir']+'/kpath'):
shutil.copy(control['initial_lattice_dir']+'/kpath', './')
if os.path.exists(control['initial_lattice_dir']+'/ini'):
shutil.copy(control['initial_lattice_dir']+'/ini', './')
if os.path.exists(control['initial_lattice_dir']+'/symmetry_operations'):
shutil.copy(control['initial_lattice_dir']+'/symmetry_operations', './')
if os.path.exists(control['initial_lattice_dir']+'/kpoints'):
shutil.copy(control['initial_lattice_dir']+'/symmetry_operations', './')
files = glob.iglob(control['initial_lattice_dir']+"/*.cif")
for filename in files:
shutil.copy(filename, './')
iter_string='_'+str(control['iter_num_outer'])
shutil.copy(control['initial_lattice_dir']+'/'+control['allfile']+'.out', control['allfile']+iter_string+'.out')
print("initial dft directory setup done", file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def create_comwann_ini(control, wan_hmat):
f=open('comwann.ini','w')
if (control['method']=='lda+dmft'):
f.write(control['lattice_directory']+'\n')
f.write('dft\n')
elif (control['method']=='lqsgw+dmft'):
f.write(control['initial_lattice_dir']+'\n')
f.write('qp\n')
elif (control['method']=='dft'):
f.write('../\n')
f.write('dft\n')
elif (control['method']=='lqsgw'):
f.write('../\n')
f.write('qp\n')
f.write(str(wan_hmat['dis_win_max'])+'\n')
f.write(str(wan_hmat['dis_win_min'])+'\n')
f.write(str(wan_hmat['froz_win_max'])+'\n')
f.write(str(wan_hmat['froz_win_min'])+'\n')
f.write(str(wan_hmat['num_iter'])+'\n')
f.write(str(wan_hmat['dis_num_iter'])+'\n')
if (wan_hmat['write_wan']):
f.write('1\n')
else:
f.write('0\n')
f.write(str(wan_hmat['cut_low'])+'\n')
f.write(str(wan_hmat['cut_froz'])+'\n')
f.write(str(wan_hmat['cut_total'])+'\n')
f.write(str(wan_hmat['rmode'])+'\n')
f.write(str(wan_hmat['radfac'])+'\n')
f.close()
def create_comcoulomb_ini(control):
f=open('comcoulomb.ini','w')
f.write(control['initial_lattice_dir']+'\n')
f.write(control['wannier_directory']+'\n')
f.write(str(control['nproc_tau_coulomb'])+'\n')
f.write(str(control['nproc_k_coulomb'])+'\n')
f.write(str(control['proj_win_min'])+'\n')
f.write(str(control['proj_win_max'])+'\n')
f.write('F\n')
f.write(control['u_mode']+'\n')
nimp_orb=0
natom=len(control['impurity_wan'])
for ii in range(natom):
nimp_orb=nimp_orb+len(control['impurity_wan'][ii])
f.write(str(nimp_orb)+'\n')
for iatom in range(natom):
f.write(' '.join(map(str,control['impurity_wan'][iatom]))+' ')
f.write('\n')
f.write('1\n')
f.write('F\n')
f.write('3.0\n')
f.write('F\n')
f.close()
# def create_wannier_inip(wan_hmat):
# # in the wannier directory
# g=open('wannier.inip', 'w')
# num_wann=np.shape(wan_hmat['basis'])[0]
# g.write(str(num_wann)+'\n')
# for ii in range(num_wann):
# if (control['spin_orbit']==False):
# tempstr=[wan_hmat['basis'][ii]['atom'], wan_hmat['basis'][ii]['l'], wan_hmat['basis'][ii]['m'], wan_hmat['basis'][ii]['xaxis'][0], wan_hmat['basis'][ii]['xaxis'][1], wan_hmat['basis'][ii]['xaxis'][2], wan_hmat['basis'][ii]['zaxis'][0], wan_hmat['basis'][ii]['zaxis'][1], wan_hmat['basis'][ii]['zaxis'][2]]
# else:
# tempstr=[wan_hmat['basis'][ii]['atom'], wan_hmat['basis'][ii]['l'], wan_hmat['basis'][ii]['i'], wan_hmat['basis'][ii]['m'], wan_hmat['basis'][ii]['xaxis'][0], wan_hmat['basis'][ii]['xaxis'][1], wan_hmat['basis'][ii]['xaxis'][2], wan_hmat['basis'][ii]['zaxis'][0], wan_hmat['basis'][ii]['zaxis'][1], wan_hmat['basis'][ii]['zaxis'][2]]
# g.write(' '.join(map(str, tempstr))+'\n')
# g.close()
# return None
def read_wan_hmat_basis(control):
# in the wannier directory
inip=np.loadtxt(control['wannier_directory']+'/wannier.inip')
basis_info=[]
if (control['spin_orbit']):
for ii in range(np.shape(inip)[0]):
basis_info.append({'atom':int(inip[ii,0]), 'l':int(inip[ii,1]), 'i':inip[ii,2],'m':inip[ii,3],'xaxis':inip[ii,4:7],'zaxis':inip[ii,7:10], 'ind':ii+1})
else:
for ii in range(np.shape(inip)[0]):
basis_info.append({'atom':int(inip[ii,0]), 'l':int(inip[ii,1]), 'm':int(inip[ii,2]),'xaxis':inip[ii,3:6],'zaxis':inip[ii,6:9], 'ind':ii+1})
print(basis_info, file=control['h_log'],flush=True)
print('reading wannier.inip to get basis information', file=control['h_log'],flush=True)
return basis_info
def check_key_in_string(key,dictionary):
if (key not in dictionary):
print('missing \''+key+'\' in '+dictionary['name'],flush=True)
sys.exit()
return None
def overwrite_key_in_string(key,dictionary,dictionaryname,value,h_log):
if (key in dictionary):
print('\''+key+'\' in '+dictionaryname+' is overwritten', file=control['h_log'],flush=True)
return value
# def dft_rst_file_check():
# check_for_files('*acc_core_dft.rst', h_log)
# check_for_files('*chemical_potential_dft.rst', h_log)
# check_for_files('*cor_norm_dft.rst', h_log)
# check_for_files('*dfi_dft.rst', h_log)
# check_for_files('*dfidot2_dft.rst', h_log)
# check_for_files('*dfidot_dft.rst', h_log)
# check_for_files('*e_bnd_dft.rst', h_log)
# check_for_files('*e_core_dft.rst', h_log)
# check_for_files('*el_density_dft.rst', h_log)
# check_for_files('*eny_dft.rst', h_log)
# check_for_files('*etot_dft.rst', h_log)
# check_for_files('*ev_bnd_*_dft.rst', h_log)
# check_for_files('*ffsmt_dft.rst', h_log)
# check_for_files('*fi_dft.rst', h_log)
# check_for_files('*fidot2_dft.rst', h_log)
# check_for_files('*fidot_dft.rst', h_log)
# check_for_files('*g_full_00_*_dft.rst', h_log)
# check_for_files('*g_loc_0_dft.rst', h_log)
# check_for_files('*gfun_dft.rst', h_log)
# check_for_files('*gfun_old_dft.rst', h_log)
# check_for_files('*gfund_dft.rst', h_log)
# check_for_files('*gfund_old_dft.rst', h_log)
# check_for_files('*n_bnd_dft.rst', h_log)
# check_for_files('*p_f_dft.rst', h_log)
# check_for_files('*pcor_dft.rst', h_log)
# check_for_files('*pcor_old_dft.rst', h_log)
# check_for_files('*pd2_f_dft.rst', h_log)
# check_for_files('*pd_f_dft.rst', h_log)
# check_for_files('*ptnl_dft.rst', h_log)
# check_for_files('*q_f_dft.rst', h_log)
# check_for_files('*qcor_dft.rst', h_log)
# check_for_files('*qcor_old_dft.rst', h_log)
# check_for_files('*qd2_f_dft.rst', h_log)
# check_for_files('*qd_f_dft.rst', h_log)
# check_for_files('*restart_ubi.rst', h_log)
# check_for_files('*ro_core_dft.rst', h_log)
# check_for_files('*v_intr_h_dft.rst', h_log)
# check_for_files('*v_intr_xc_dft.rst', h_log)
# check_for_files('*v_mt_h_dft.rst', h_log)
# check_for_files('*v_mt_xc_dft.rst', h_log)
# check_for_files('*z_bnd_*_dft.rst', h_log)
# return None
# def string_addwhitespace(string, stringsize):
# stringout=string
# if stringsize > len(string):
# stringout=string+' '*(stringsize-len(string))
# return stringout
def find_all_in_string(str, ch):
for i, ltr in enumerate(str):
if ltr == ch:
yield i
def read_convergence_table(control):
if os.path.exists(control['top_dir']+'/convergence.log'):
with open(control['top_dir']+'/convergence.log', 'r') as logfile:
tmp=logfile.readlines()
nstep=len(tmp)-2
if (nstep>0):
endind=list(find_all_in_string(tmp[1],' '))[::2]+[len(tmp[1])-1]
startind=[0]+(np.array(list(find_all_in_string(tmp[1],' '))[1::2])+1).tolist()
ncolumn=len(endind)
f=open('./convergence.log', 'r')
f.readline()
f.readline()
convergence_table=[]
for lines in f:
eachline=[]
for ii in range(ncolumn):
eachline.append(lines.rstrip()[startind[ii]:endind[ii]])
if (len(eachline[0])>0):
convergence_table.append(eachline)
f.close()
else:
convergence_table=[]
else:
convergence_table=[]
return convergence_table
def generate_initial_self_energy(control,imp):
os.chdir(control['impurity_directory'])
if ('initial_self_energy' in control):
shutil.copy(control['initial_self_energy'], './sig.dat')
if ('initial_impurity_dir' in control):
initial_impurity_dirname=os.path.abspath(os.path.dirname(control['initial_impurity_dir']))
directories = glob.glob(initial_impurity_dirname+"/*/")
for directory_name in directories:
dest_dir=directory_name.split('/')[-2]
files = glob.iglob(os.path.abspath(directory_name)+"/config*")
for filename in files:
shutil.copy(filename, control['impurity_directory']+'/'+dest_dir)
else:
dc=np.loadtxt(control['dc_directory']+'/dc.dat')
beta=imp['beta']
n_omega=control['n_omega']
omega=control['omega']
cnt=0
dclist=[]
for ii in sorted(set(control['impurity_problem_equivalence'])):
for jj in sorted(set(imp[str(abs(ii))]['impurity_matrix'].flatten().tolist())-{0}):
if (imp[str(abs(ii))]['para']):
dclist=dclist+list(dc[(2*cnt):(2*cnt+2)])
else:
dclist=dclist+list(dc[(2*cnt):(2*cnt+2)]-np.array([0.001*np.sign(ii), 0.0]))
cnt=cnt+1
sig_table=[]
for jj in range(control['n_omega']):
sig_omega=[control['omega'][jj]]+dclist
sig_table.append(sig_omega)
with open('./sig.dat', 'w') as outputfile:
outputfile.write(tabulate(sig_table, headers=control['sig_header'], floatfmt=".12f", numalign="right", tablefmt="plain"))
if (control['method']=='lqsgw+dmft'):
iter_string='_0'
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_0'
labeling_file('./sig.dat', iter_string)
print('initial_self_energy generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def prepare_initial_ef(control):
os.chdir(control['lowh_directory'])
f=open('ef.dat','w')
f.write('0.0\n')
f.close()
os.chdir(control['top_dir'])
return None
def delta_postprocessing(control,imp):
write_transformation_matrix(control,control['lowh_directory']+'/local_spectral_matrix_ef.dat')
cal_projected_mean_field_diagonal(control,imp)
cal_dc_diagonal(control)
cal_zinv_m1_diagonal(control)
cal_e_imp_diagonal(control)
delta_causality=cal_hyb_diagonal(control,imp)
if (delta_causality ==0):
print('delta causality broken', file=control['h_log'],flush=True)
sys.exit()
return delta_causality
def cal_dc_diagonal(control):
os.chdir(control['dc_directory'])
dc_mat=read_impurity_mat_static(control,control['dc_directory']+'/dc_mat.dat')
h=open('./dc.dat', 'w')
for ii in sorted(set(control['impurity_problem_equivalence'])):
dc_vec=imp_from_mat_to_array(dc_mat[str(ii)],imp[str(abs(ii))]['impurity_matrix'])
for jj in range(len(dc_vec)):
h.write(str(np.real(dc_vec[jj]))+' '+str(np.imag(dc_vec[jj]))+' ')
h.close()
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./dc.dat', iter_string)
print('dc.dat generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
# def cal_dc_diagonal_new(control):
# os.chdir(control['dc_directory'])
# dc_mat=read_impurity_mat_static(control,control['dc_directory']+'/dc_mat.dat')
# h=open('./dc.dat', 'w')
# for ii in sorted(set(control['impurity_problem_equivalence'])):
# dc_vec=imp_from_mat_to_array(dc_mat[str(ii)],imp[str(abs(ii))]['impurity_matrix'])
# for jj in range(len(dc_vec)):
# h.write(str(np.real(dc_vec[jj]))+' '+str(np.imag(dc_vec[jj]))+' ')
# h.close()
# if (control['method']=='lqsgw+dmft'):
# iter_string='_'+str(control['iter_num_impurity'])
# elif (control['method']=='lda+dmft'):
# iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
# labeling_file('./dc.dat', iter_string)
# print('dc.dat generation done', file=control['h_log'],flush=True)
# os.chdir(control['top_dir'])
# return None
def cal_zinv_m1_diagonal(control):
os.chdir(control['dc_directory'])
if os.path.isfile(control['dc_directory']+'/zinv_m1_mat.dat'):
zinv_m1_mat=read_impurity_mat_static(control,control['dc_directory']+'/zinv_m1_mat.dat')
h=open('./zinv_m1.dat', 'w')
for ii in sorted(set(control['impurity_problem_equivalence'])):
zinv_m1_vec=imp_from_mat_to_array(zinv_m1_mat[str(ii)],imp[str(abs(ii))]['impurity_matrix'])
for jj in range(len(zinv_m1_vec)):
h.write(str(np.real(zinv_m1_vec[jj]))+' '+str(np.imag(zinv_m1_vec[jj]))+' ')
h.close()
if (control['method']=='lqsgw+dmft'):
iter_string='_'+str(control['iter_num_impurity'])
elif (control['method']=='lda+dmft'):
iter_string='_'+str(control['iter_num_outer'])+'_'+str(control['iter_num_impurity'])
labeling_file('./zinv_m1.dat', iter_string)
print('zinv_m1.dat generation done', file=control['h_log'],flush=True)
os.chdir(control['top_dir'])
return None
def vec_from_mat_dynamic(mat,trans):
vec=np.zeros(np.shape(mat, 0), np.shape(mat, 1))
for ii in range(np.shape(mat, 0)):
vec[ii,:]=np.diag(dot(np.transpose(np.conj(trans)), np.dot(mat[ii,:,:], trans)))
return vec
def prepare_impurity_solver(control,wan_hmat,imp):
# cal_trans_from_patrick(control, imp)
delta=array_impurity_dynamic(control,imp,control['lowh_directory']+'/delta.dat')
write_json_all(control,imp,delta,'hyb.json')
e_imp=generate_mat_from_array_impurity_static(control,imp,control['lowh_directory']+'/e_imp.dat')
trans_basis=read_impurity_mat_static(control,control['lowh_directory']+'/trans_basis.dat')
for key, value in imp.items():
if (not (isinstance(imp[key], dict))):
continue
nimp_orb=len(imp[key]['impurity_matrix'])
os.chdir(control['impurity_directory']+'/'+key)
if (control['spin_orbit']):
ndim=nimp_orb
e_imp_key=np.zeros((ndim, ndim))
trans_key=np.zeros((ndim, ndim))
# equivalence_key=np.zeros((ndim,ndim),dtype='int')
e_imp_key=np.real(e_imp[key])
trans_key=np.real(trans_basis[key])
# equivalence_key=array([[(lambda ii: str(ii) if str(ii)!='0' else '')(ii) for ii in row] for row in imp[key]['impurity_matrix']])
equivalence_key=list(map(lambda row: list(map(lambda x: str(x) if x!='0' else '', list(map(str, row)))), imp[key]['impurity_matrix']))
else:
ndim=nimp_orb*2
e_imp_key=np.zeros((ndim, ndim))
trans_key=np.zeros((ndim, ndim))
equivalence_key_int_mat=np.array(imp[key]['impurity_matrix'])
equivalence_key_int_mat_all= | np.zeros((ndim, ndim),dtype='int') | numpy.zeros |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.