prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python3
import sys
import os
import shutil
import cv2
from ament_index_python import get_package_share_directory
import yaml
import numpy as np
from transforms3d.euler import euler2mat, mat2euler
from subprocess import check_output, run
from copy import deepcopy
import xacro
from urdf_parser_py.urdf import URDF
marine_ppt = get_package_share_directory('marine_presenter')
def add_icon(img, x, y, w):
im = cv2.imread(img)
H,W = im.shape[:2]
X = int(W*x/100.)
Y = int(H*y/100.)
w /= 100.
icon = cv2.imread(marine_ppt + '/objects/video.png')
scale = w*W/icon.shape[1]
icon = cv2.resize(icon, None, fx=scale, fy=scale, interpolation=cv2.INTER_LANCZOS4)
im[Y:Y+icon.shape[0], X-icon.shape[1]:X] = icon
cv2.imwrite(img, im)
def add_borders(img, w = 20):
im = cv2.imread(img)
H,W = im.shape[:2]
w = int(W*(1+w/100.))
im2 = np.full((H,W+w,3), 255, dtype=np.uint8)
im2[:,w//2:W+w//2] = im
cv2.imwrite(img, im2)
# get initial pdf
filename = sys.argv[1]
if filename[-4:] != '.pdf':
filename = (filename+'.pdf').replace('..','.')
if not os.path.exists(filename):
print(filename + ' does not exist, exiting')
sys.exit(0)
# check config file
if os.path.exists(filename.replace('.pdf', '.yaml')):
config = yaml.safe_load(open(filename.replace('.pdf', '.yaml')))
else:
config = {}
video_x = 99.5
video_y = 0.5
video_w = 3
if 'video' in config:
video_x = config['video']['x']
video_y = config['video']['y']
video_w = config['video']['w']
if 'scale' not in config:
config['scale'] = 3
scale = config['scale']
ext = 'png'
cam_pose = [1, 0, scale, 0, 0, 0] # wrt current slide
def dict_replace(s, d):
for key in d:
s = s.replace(key, d[key])
return s
def read_pose(pose, base_pose = (0,0,0,0,0,0)):
if type(pose) == dict:
if 'pose' in pose:
return read_pose(pose['pose'], base_pose)
return base_pose
if len(pose) >= 3:
pose[2] *= scale
if len(pose) == 6:
return pose
# complete with base_pose
return pose + base_pose[len(pose)-6:]
def Homogeneous(pose):
R = euler2mat(pose[3], pose[4], pose[5], 'rxyz')
t = | np.array(pose[:3]) | numpy.array |
from typing import Optional
import gym
import numpy as np
from jax_rl.datasets.dataset import Dataset
import pandas as pd
from .welford import Welford
from .equivariant_standardization import EquivStandardizer
import abc
import collections
import numpy as np
Batch = collections.namedtuple(
'Batch',
['observations', 'actions', 'rewards', 'masks', 'next_observations'])
class ReplayBuffer(Dataset):
def __init__(self, observation_space: gym.spaces.Box, action_dim: int,
capacity: int,rep,state_transform,inv_state_transform,standardize=False):
observations = np.empty((capacity, *observation_space.shape),
dtype=observation_space.dtype)
actions = np.empty((capacity, action_dim), dtype=np.float32)
rewards = np.empty((capacity, ), dtype=np.float32)
masks = np.empty((capacity, ), dtype=np.float32)
next_observations = np.empty((capacity, *observation_space.shape),
dtype=observation_space.dtype)
super().__init__(observations=observations,
actions=actions,
rewards=rewards,
masks=masks,
next_observations=next_observations,
size=0)
self.size = 0
self.insert_index = 0
self.capacity = capacity
self.restarts = | np.zeros(capacity) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 8 21:39:07 2022
@author: rainn
"""
import numpy as np
from PIL import Image
# from https://stackoverflow.com/questions/34913005/color-space-mapping-ycbcr-to-rgb
def ycbcr2rgb(im):
xform = | np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]]) | numpy.array |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.convolution import Gaussian2DKernel
import astropy.units as u
from ...utils.testing import requires_dependency, requires_data, mpl_plot_check
from ...cube import PSFKernel
from ...irf import EnergyDependentMultiGaussPSF
from ..utils import fill_poisson
from ..geom import MapAxis, MapCoord, coordsys_to_frame
from ..base import Map
from ..wcs import WcsGeom
from ..hpx import HpxGeom
from ..wcsnd import WcsNDMap
pytest.importorskip("reproject")
axes1 = [MapAxis(np.logspace(0.0, 3.0, 3), interp="log", name="spam")]
axes2 = [
MapAxis(np.logspace(0.0, 3.0, 3), interp="log"),
MapAxis(np.logspace(1.0, 3.0, 4), interp="lin"),
]
skydir = SkyCoord(110.0, 75.0, unit="deg", frame="icrs")
wcs_allsky_test_geoms = [
(None, 10.0, "GAL", "AIT", skydir, None),
(None, 10.0, "GAL", "AIT", skydir, axes1),
(None, [10.0, 20.0], "GAL", "AIT", skydir, axes1),
(None, 10.0, "GAL", "AIT", skydir, axes2),
(None, [[10.0, 20.0, 30.0], [10.0, 20.0, 30.0]], "GAL", "AIT", skydir, axes2),
]
wcs_partialsky_test_geoms = [
(10, 1.0, "GAL", "AIT", skydir, None),
(10, 1.0, "GAL", "AIT", skydir, axes1),
(10, [1.0, 2.0], "GAL", "AIT", skydir, axes1),
(10, 1.0, "GAL", "AIT", skydir, axes2),
(10, [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0]], "GAL", "AIT", skydir, axes2),
]
wcs_test_geoms = wcs_allsky_test_geoms + wcs_partialsky_test_geoms
@pytest.mark.parametrize(
("npix", "binsz", "coordsys", "proj", "skydir", "axes"), wcs_test_geoms
)
def test_wcsndmap_init(npix, binsz, coordsys, proj, skydir, axes):
geom = WcsGeom.create(
npix=npix, binsz=binsz, proj=proj, coordsys=coordsys, axes=axes
)
m0 = WcsNDMap(geom)
coords = m0.geom.get_coord()
m0.set_by_coord(coords, coords[1])
m1 = WcsNDMap(geom, m0.data)
assert_allclose(m0.data, m1.data)
@pytest.mark.parametrize(
("npix", "binsz", "coordsys", "proj", "skydir", "axes"), wcs_test_geoms
)
def test_wcsndmap_read_write(tmpdir, npix, binsz, coordsys, proj, skydir, axes):
geom = WcsGeom.create(
npix=npix, binsz=binsz, proj=proj, coordsys=coordsys, axes=axes
)
filename = str(tmpdir / "map.fits")
m0 = WcsNDMap(geom)
fill_poisson(m0, mu=0.5)
m0.write(filename, overwrite=True)
m1 = WcsNDMap.read(filename)
m2 = Map.read(filename)
m3 = Map.read(filename, map_type="wcs")
assert_allclose(m0.data, m1.data)
assert_allclose(m0.data, m2.data)
assert_allclose(m0.data, m3.data)
m0.write(filename, sparse=True, overwrite=True)
m1 = WcsNDMap.read(filename)
m2 = Map.read(filename)
m3 = Map.read(filename, map_type="wcs")
assert_allclose(m0.data, m1.data)
assert_allclose(m0.data, m2.data)
assert_allclose(m0.data, m3.data)
# Specify alternate HDU name for IMAGE and BANDS table
m0.write(filename, hdu="IMAGE", hdu_bands="TEST", overwrite=True)
m1 = WcsNDMap.read(filename)
m2 = Map.read(filename)
m3 = Map.read(filename, map_type="wcs")
def test_wcsndmap_read_write_fgst(tmpdir):
filename = str(tmpdir / "map.fits")
axis = MapAxis.from_bounds(100.0, 1000.0, 4, name="energy", unit="MeV")
geom = WcsGeom.create(npix=10, binsz=1.0, proj="AIT", coordsys="GAL", axes=[axis])
# Test Counts Cube
m = WcsNDMap(geom)
m.write(filename, conv="fgst-ccube", overwrite=True)
with fits.open(filename) as h:
assert "EBOUNDS" in h
m2 = Map.read(filename)
assert m2.geom.conv == "fgst-ccube"
# Test Model Cube
m.write(filename, conv="fgst-template", overwrite=True)
with fits.open(filename) as h:
assert "ENERGIES" in h
m2 = Map.read(filename)
assert m2.geom.conv == "fgst-template"
def test_wcs_nd_map_data_transpose_issue(tmpdir):
# Regression test for https://github.com/gammapy/gammapy/issues/1346
# Our test case: a little map with WCS shape (3, 2), i.e. numpy array shape (2, 3)
data = np.array([[0, 1, 2], [np.nan, np.inf, -np.inf]])
geom = WcsGeom.create(npix=(3, 2))
# Data should be unmodified after init
m = WcsNDMap(data=data, geom=geom)
assert_equal(m.data, data)
# Data should be unmodified if initialised like this
m = WcsNDMap(geom=geom)
# and then filled via an in-place Numpy array operation
m.data += data
assert_equal(m.data, data)
# Data should be unmodified after write / read to normal image format
filename = str(tmpdir / "normal.fits.gz")
m.write(filename)
assert_equal(Map.read(filename).data, data)
# Data should be unmodified after write / read to sparse image format
filename = str(tmpdir / "sparse.fits.gz")
m.write(filename)
assert_equal(Map.read(filename).data, data)
@pytest.mark.parametrize(
("npix", "binsz", "coordsys", "proj", "skydir", "axes"), wcs_test_geoms
)
def test_wcsndmap_set_get_by_pix(npix, binsz, coordsys, proj, skydir, axes):
geom = WcsGeom.create(
npix=npix, binsz=binsz, skydir=skydir, proj=proj, coordsys=coordsys, axes=axes
)
m = WcsNDMap(geom)
coords = m.geom.get_coord()
pix = m.geom.get_idx()
m.set_by_pix(pix, coords[0])
assert_allclose(coords[0], m.get_by_pix(pix))
@pytest.mark.parametrize(
("npix", "binsz", "coordsys", "proj", "skydir", "axes"), wcs_test_geoms
)
def test_wcsndmap_set_get_by_coord(npix, binsz, coordsys, proj, skydir, axes):
geom = WcsGeom.create(
npix=npix, binsz=binsz, skydir=skydir, proj=proj, coordsys=coordsys, axes=axes
)
m = WcsNDMap(geom)
coords = m.geom.get_coord()
m.set_by_coord(coords, coords[0])
assert_allclose(coords[0], m.get_by_coord(coords))
if not geom.is_allsky:
coords[1][...] = 0.0
assert_allclose(np.nan * np.ones(coords[0].shape), m.get_by_coord(coords))
# Test with SkyCoords
m = WcsNDMap(geom)
coords = m.geom.get_coord()
skydir = SkyCoord(
coords[0], coords[1], unit="deg", frame=coordsys_to_frame(geom.coordsys)
)
skydir_cel = skydir.transform_to("icrs")
skydir_gal = skydir.transform_to("galactic")
m.set_by_coord((skydir_gal,) + tuple(coords[2:]), coords[0])
assert_allclose(coords[0], m.get_by_coord(coords))
assert_allclose(
m.get_by_coord((skydir_cel,) + tuple(coords[2:])),
m.get_by_coord((skydir_gal,) + tuple(coords[2:])),
)
# Test with MapCoord
m = WcsNDMap(geom)
coords = m.geom.get_coord()
coords_dict = dict(lon=coords[0], lat=coords[1])
if axes:
for i, ax in enumerate(axes):
coords_dict[ax.name] = coords[i + 2]
map_coords = MapCoord.create(coords_dict, coordsys=coordsys)
m.set_by_coord(map_coords, coords[0])
assert_allclose(coords[0], m.get_by_coord(map_coords))
def test_set_get_by_coord_quantities():
ax = MapAxis(np.logspace(0.0, 3.0, 3), interp="log", name="energy", unit="TeV")
geom = WcsGeom.create(binsz=0.1, npix=(3, 4), axes=[ax])
m = WcsNDMap(geom)
coords_dict = {"lon": 0, "lat": 0, "energy": 1000 * u.GeV}
m.set_by_coord(coords_dict, 42)
coords_dict["energy"] = 1 * u.TeV
assert_allclose(42, m.get_by_coord(coords_dict))
@pytest.mark.parametrize(
("npix", "binsz", "coordsys", "proj", "skydir", "axes"), wcs_test_geoms
)
def test_wcsndmap_fill_by_coord(npix, binsz, coordsys, proj, skydir, axes):
geom = WcsGeom.create(
npix=npix, binsz=binsz, skydir=skydir, proj=proj, coordsys=coordsys, axes=axes
)
m = WcsNDMap(geom)
coords = m.geom.get_coord()
fill_coords = tuple([np.concatenate((t, t)) for t in coords])
fill_vals = fill_coords[1]
m.fill_by_coord(fill_coords, fill_vals)
assert_allclose(m.get_by_coord(coords), 2.0 * coords[1])
# Test with SkyCoords
m = WcsNDMap(geom)
coords = m.geom.get_coord()
skydir = SkyCoord(
coords[0], coords[1], unit="deg", frame=coordsys_to_frame(geom.coordsys)
)
skydir_cel = skydir.transform_to("icrs")
skydir_gal = skydir.transform_to("galactic")
fill_coords_cel = (skydir_cel,) + tuple(coords[2:])
fill_coords_gal = (skydir_gal,) + tuple(coords[2:])
m.fill_by_coord(fill_coords_cel, coords[1])
m.fill_by_coord(fill_coords_gal, coords[1])
assert_allclose(m.get_by_coord(coords), 2.0 * coords[1])
@pytest.mark.parametrize(
("npix", "binsz", "coordsys", "proj", "skydir", "axes"), wcs_test_geoms
)
def test_wcsndmap_coadd(npix, binsz, coordsys, proj, skydir, axes):
geom = WcsGeom.create(
npix=npix, binsz=binsz, skydir=skydir, proj=proj, coordsys=coordsys, axes=axes
)
m0 = WcsNDMap(geom)
m1 = WcsNDMap(geom.upsample(2))
coords = m0.geom.get_coord()
m1.fill_by_coord(
tuple([np.concatenate((t, t)) for t in coords]),
np.concatenate((coords[1], coords[1])),
)
m0.coadd(m1)
assert_allclose(np.nansum(m0.data), np.nansum(m1.data), rtol=1e-4)
@pytest.mark.parametrize(
("npix", "binsz", "coordsys", "proj", "skydir", "axes"), wcs_test_geoms
)
def test_wcsndmap_interp_by_coord(npix, binsz, coordsys, proj, skydir, axes):
geom = WcsGeom.create(
npix=npix, binsz=binsz, skydir=skydir, proj=proj, coordsys=coordsys, axes=axes
)
m = WcsNDMap(geom)
coords = m.geom.get_coord(flat=True)
m.set_by_coord(coords, coords[1])
assert_allclose(coords[1], m.interp_by_coord(coords, interp="nearest"))
assert_allclose(coords[1], m.interp_by_coord(coords, interp="linear"))
assert_allclose(coords[1], m.interp_by_coord(coords, interp=1))
if geom.is_regular and not geom.is_allsky:
assert_allclose(coords[1], m.interp_by_coord(coords, interp="cubic"))
def test_interp_by_coord_quantities():
ax = MapAxis(
| np.logspace(0.0, 3.0, 3) | numpy.logspace |
# first to start the nameserver start: python -m Pyro4.naming
import time
from threading import Thread
import numpy as np
import Pyro4
from rlkit.launchers import conf as config
Pyro4.config.SERIALIZERS_ACCEPTED = set(["pickle", "json", "marshal", "serpent"])
Pyro4.config.SERIALIZER = "pickle"
device_state = None
@Pyro4.expose
class DeviceState(object):
state = None
def get_state(self):
return device_state
def set_state(self, state):
global device_state
device_state = state
class SpaceMouseExpert:
def __init__(
self,
xyz_dims=3,
xyz_remap=[0, 1, 2],
xyz_scale=[1, 1, 1],
xyz_abs_threshold=0.0,
rot_dims=3,
rot_remap=[0, 1, 2],
rot_scale=[1, 1, 1],
rot_abs_threshold=0.0,
rot_discrete=False,
min_clip=-np.inf,
max_clip=np.inf,
):
"""TODO: fill in other params"""
self.xyz_dims = xyz_dims
self.xyz_remap = np.array(xyz_remap)
self.xyz_scale = np.array(xyz_scale)
self.xyz_abs_threshold = xyz_abs_threshold
self.rot_dims = rot_dims
self.rot_remap = rot_remap
self.rot_scale = rot_scale
self.rot_abs_threshold = rot_abs_threshold
self.rot_discrete = rot_discrete
self.min_clip = min_clip
self.max_clip = max_clip
self.thread = Thread(target=start_server)
self.thread.daemon = True
self.thread.start()
self.device_state = DeviceState()
def get_action(self, obs):
"""Must return (action, valid, reset, accept)"""
state = self.device_state.get_state()
# time.sleep(0.1)
if state is None:
return None, False, False, False
dpos, rotation, roll, pitch, yaw, accept, reset = (
state["dpos"],
state["rotation"],
state["roll"],
state["pitch"],
state["yaw"],
state["grasp"], # ["left_click"],
state["reset"], # ["right_click"],
)
xyz = dpos[self.xyz_remap]
xyz[np.abs(xyz) < self.xyz_abs_threshold] = 0.0
xyz = xyz * self.xyz_scale
xyz = np.clip(xyz, self.min_clip, self.max_clip)
rot = np.array([roll, pitch, yaw])
rot[ | np.abs(rot) | numpy.abs |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Name: NetPanelAnalysis
Function: 计算柔性防护系统中任意四边形钢丝绳网片顶破力、顶破位移、耗能能力
Note: 国际单位制
Version: 1.2.1
Author: <NAME>
Date: from 2021/8/31 to
命名方式:以平行于x方向及y方向分别作为后缀
Remark: 尚未解决的问题:
(1)考虑矩形之外的网孔形状
(2)考虑柔性边界刚度
'''
import numpy as np
from userfunc_NPA import *
######################################################################################################################################################
# 本部分代码用于校准另一种方法
def func_cablenet_xyz(theta, H, w, Rp, Rs, a, m):
i_arr = np.arange(1,m+0.1,step=1)
xP_arr = a/2*(2*i_arr - m - 1)
yP_arr = np.sqrt(Rp**2 - xP_arr**2)
zP_arr = H*np.ones_like(xP_arr)
theta_1 = np.arcsin(xP_arr[-1]/(w/np.sqrt(2)))
theta_2 = np.arccos(xP_arr[-1]/(w/np.sqrt(2)))
if theta>=0 and theta<theta_1:
m1 = int(m/2 - 1/2*func_round(np.sqrt(2)*w*np.sin(theta)/a))
i1_arr = np.arange(1,m1+0.1,step=1)
i2_arr = np.arange(m1+1,m+0.1,step=1)
yQ1_arr = w/np.sqrt(2)*np.cos(theta) - abs(xP_arr[0] +w/np.sqrt(2)*np.sin(theta))*np.tan(np.pi/4+theta) + a*(i1_arr-1)*np.tan(np.pi/4+theta)
yQ2_arr = w/np.sqrt(2)*np.cos(theta) - abs(xP_arr[m1]+w/np.sqrt(2)*np.sin(theta))*np.tan(np.pi/4-theta) - a*(i2_arr-m1-1)*np.tan(np.pi/4-theta)
xQ_arr = xP_arr
yQ_arr = np.concatenate((yQ1_arr,yQ2_arr))
zQ_arr = np.zeros_like(xP_arr)
elif theta>=theta_1 and theta<=theta_2:
xQ_arr = xP_arr
yQ_arr = w/np.sqrt(2)*np.cos(theta) - abs(xP_arr[0] +w/np.sqrt(2)*np.sin(theta))*np.tan(np.pi/4-theta) - a*(i_arr-1)*np.tan(np.pi/4-theta)
zQ_arr = np.zeros_like(xP_arr)
elif theta>theta_2 and theta<np.pi/2:
m1 = m/2 - 1/2*func_round(np.sqrt(2)*w*np.cos(theta)/a)
i1_arr = np.arange(1,m1+0.1,step=1)
i2_arr = np.arange(m1+1,m+0.1,step=1)
yQ1_arr = w/np.sqrt(2)*np.sin(theta) - abs(xP_arr[0] -w/np.sqrt(2)* | np.cos(theta) | numpy.cos |
# coding=utf-8
# Copyright 2021 The init2winit Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for CurvatureEvaluator class.
"""
import os
import shutil
import tempfile
from absl.testing import absltest
from flax import jax_utils
from flax import nn
from flax import optim
from init2winit import checkpoint
from init2winit import hyperparameters
from init2winit import trainer
from init2winit.dataset_lib import datasets
from init2winit.hessian import hessian_eval
from init2winit.hessian import run_lanczos
from init2winit.init_lib import initializers
from init2winit.model_lib import models
from jax.flatten_util import ravel_pytree
import jax.numpy as jnp
import jax.random
import numpy as np
import tensorflow.compat.v1 as tf # importing this is needed for tfds mocking.
import tensorflow_datasets as tfds
CONFIG = {
'num_batches': 25,
'rng_key': 0,
'use_training_gen': True,
'update_stats': True,
'num_points': 20,
'num_eval_draws': 6,
'compute_stats': True,
'lower_thresh': -0.1,
'upper_thresh': 0.1,
'name': 'stats',
'eval_hessian': True,
'eval_gradient_covariance': True,
'compute_interps': True,
'num_lanczos_steps': 40,
'hparam_overrides': {},
'average_hosts': True,
'num_eigens': 3}
def _batch_square_loss(flax_module, batch):
"""Helper function to compute square loss of model on the given batch.
The function computes frac{1}{B} sum_{i=1}^B (y - hat{y})^2 where B is the
batch-size.
Args:
flax_module: The flax module representing the model.
batch: A dictionary with keys 'inputs' and 'targets'.
Returns:
total_loss: The loss averaged over the batch.
"""
batch, rng = batch
del rng
batch_size = batch['targets'].shape[0]
preds = flax_module(batch['inputs']).reshape((batch_size, -1))
batch_targets = batch['targets'].reshape((batch_size, -1))
square_loss = jnp.mean(jnp.sum(jnp.square(preds - batch_targets), axis=1))
total_loss = square_loss
return total_loss
class LinearModel(nn.Module):
"""Defines a simple linear model for the purpose of testing.
The model assumes the input data has shape
[batch_size_per_device, feature_dim]. The model flatten the input before
applying a dense layer.
"""
def apply(self, x, num_outputs):
x = jnp.reshape(x, (x.shape[0], -1))
x = nn.Dense(x, features=num_outputs, bias=False)
return x
def _get_synth_data(num_examples, dim, num_outputs, batch_size):
"""Generates a fake data class for testing."""
hess = np.ones((1, dim))
hess[0, :CONFIG['num_eigens']] += np.arange(CONFIG['num_eigens'])
feature = np.random.normal(size=(num_examples, dim)) / np.sqrt(dim)
feature = np.multiply(feature, hess)
feature = feature.astype(np.float32)
y = np.random.normal(size=(num_examples, num_outputs))
y = y.astype(np.float32)
class SynthData(object):
def train_iterator_fn(self):
for ind in range(0, num_examples, batch_size):
batch = {'inputs': feature[ind:ind + batch_size, :],
'targets': y[ind:ind + batch_size, :]}
yield batch
return SynthData, feature, y
def _to_vec(pytree):
"""Helper function that converts a pytree to a n-by-1 vector."""
vec, _ = ravel_pytree(pytree)
n = len(vec)
vec = vec.reshape((n, 1))
return vec
def _quad_grad(x, y, beta):
"""Computes the gradient of a linear model with square loss."""
num_obs = x.shape[0]
assert len(y.shape) == 2 and y.shape[0] == num_obs
exact_grad = - np.dot(x.T, y) + np.dot(x.T, np.dot(x, beta))
exact_grad = 2 * exact_grad / num_obs
return exact_grad
class TrainerTest(absltest.TestCase):
"""Tests examining the CurvatureEvaluator class."""
def setUp(self):
super(TrainerTest, self).setUp()
self.test_dir = tempfile.mkdtemp()
rng = jax.random.PRNGKey(0)
np.random.seed(0)
self.feature_dim = 100
num_outputs = 1
self.batch_size = 32
num_examples = 2048
def create_model(key):
module = LinearModel.partial(num_outputs=num_outputs)
_, init = module.init_by_shape(key,
[((self.batch_size, self.feature_dim),
jnp.float32)])
model = nn.Model(module, init)
return model
model = create_model(rng)
# Linear model coefficients
self.beta = model.params['Dense_0']['kernel']
self.beta = self.beta.reshape((self.feature_dim, 1))
self.beta = self.beta.astype(np.float32)
self.optimizer = optim.GradientDescent(learning_rate=1.0).create(model)
self.optimizer = jax_utils.replicate(self.optimizer)
data_class, self.feature, self.y = _get_synth_data(num_examples,
self.feature_dim,
num_outputs,
self.batch_size)
self.evaluator = hessian_eval.CurvatureEvaluator(
self.optimizer.target,
CONFIG,
data_class(),
_batch_square_loss)
# Computing the exact full-batch quantities from the linear model
num_obs = CONFIG['num_batches'] * self.batch_size
xb = self.feature[:num_obs, :]
yb = self.y[:num_obs, :]
self.fb_grad = _quad_grad(xb, yb, self.beta)
self.hessian = 2 * np.dot(xb.T, xb) / num_obs
def tearDown(self):
shutil.rmtree(self.test_dir)
super(TrainerTest, self).tearDown()
def test_run_lanczos(self):
"""Test training for two epochs on MNIST with a small model."""
rng = jax.random.PRNGKey(0)
# Set the numpy seed to make the fake data deterministc. mocking.mock_data
# ultimately calls numpy.random.
np.random.seed(0)
model_name = 'fully_connected'
loss_name = 'cross_entropy'
metrics_name = 'classification_metrics'
initializer_name = 'noop'
dataset_name = 'mnist'
model_cls = models.get_model(model_name)
initializer = initializers.get_initializer(initializer_name)
dataset_builder = datasets.get_dataset(dataset_name)
hparam_overrides = {
'lr_hparams': {
'base_lr': 0.1,
'schedule': 'cosine'
},
'batch_size': 8,
'train_size': 160,
'valid_size': 96,
'test_size': 80,
}
hps = hyperparameters.build_hparams(
model_name,
initializer_name,
dataset_name,
hparam_file=None,
hparam_overrides=hparam_overrides)
model = model_cls(hps, datasets.get_dataset_meta_data(dataset_name),
loss_name, metrics_name)
eval_batch_size = 16
num_examples = 256
def as_dataset(self, *args, **kwargs):
del args
del kwargs
# pylint: disable=g-long-lambda,g-complex-comprehension
return tf.data.Dataset.from_generator(
lambda: ({
'image': np.ones(shape=(28, 28, 1), dtype=np.uint8),
'label': 9,
} for i in range(num_examples)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
# This will override the tfds.load(mnist) call to return 100 fake samples.
with tfds.testing.mock_data(
as_dataset_fn=as_dataset, num_examples=num_examples):
dataset = dataset_builder(
shuffle_rng=jax.random.PRNGKey(0),
batch_size=hps.batch_size,
eval_batch_size=eval_batch_size,
hps=hps)
num_train_steps = 41
eval_num_batches = 5
eval_every = 10
checkpoint_steps = [40]
_ = list(
trainer.train(
train_dir=self.test_dir,
model=model,
dataset_builder=lambda *unused_args, **unused_kwargs: dataset,
initializer=initializer,
num_train_steps=num_train_steps,
hps=hps,
rng=rng,
eval_batch_size=eval_batch_size,
eval_num_batches=eval_num_batches,
eval_train_num_batches=eval_num_batches,
eval_frequency=eval_every,
checkpoint_steps=checkpoint_steps))
checkpoint_dir = os.path.join(self.test_dir, 'checkpoints')
rng = jax.random.PRNGKey(0)
run_lanczos.eval_checkpoints(
checkpoint_dir,
hps,
rng,
eval_num_batches,
model_cls=model_cls,
dataset_builder=lambda *unused_args, **unused_kwargs: dataset,
dataset_meta_data=datasets.get_dataset_meta_data(dataset_name),
hessian_eval_config=CONFIG,
)
# Load the saved file.
stats_file = os.path.join(checkpoint_dir, 'stats')
latest = checkpoint.load_latest_checkpoint(stats_file)
state_list = latest.pytree if latest else []
# Test that the logged steps are correct.
saved_steps = [row['step'] for row in state_list]
self.assertEqual(saved_steps, checkpoint_steps)
def test_grads(self):
"""Test the computed gradients using a linear model."""
dim = self.feature_dim
bs = self.batch_size
num_batches = CONFIG['num_batches']
num_draws = CONFIG['num_eval_draws']
grads, _ = self.evaluator.compute_dirs(self.optimizer)
# Assert both full and mini batch gradients are accurate
for i in range(num_draws + 1):
dir_vec = _to_vec(grads[i])[:, 0]
self.assertLen(dir_vec, dim)
# i == num_draws corresponds to full-batch directions
if i == num_draws:
start = 0
end = num_batches * bs
else:
start = i * bs
end = (i + 1) * bs
xb = self.feature[start:end, :]
yb = self.y[start:end, :]
exact_grad = _quad_grad(xb, yb, self.beta)[:, 0]
add_err = np.max(np.abs(dir_vec - exact_grad))
self.assertLessEqual(add_err, 1e-6)
rel_err = np.abs(exact_grad / dir_vec - 1.0)
rel_err = np.max(rel_err)
self.assertLessEqual(rel_err, 1e-4)
def test_statistics(self):
"""Test the computed statistics using a linear model."""
bs = self.batch_size
num_batches = CONFIG['num_batches']
num_draws = CONFIG['num_eval_draws']
step = 0
grads, _ = self.evaluator.compute_dirs(self.optimizer)
_, q = np.linalg.eigh(self.hessian)
evecs = [q[:, -k] for k in range(CONFIG['num_eigens'], 0, -1)]
q = q[:, -CONFIG['num_eigens']:]
stats_row = self.evaluator.evaluate_stats(self.optimizer.target, grads, [],
evecs, [], step)
# Assert that the statistics are exact
for i in range(num_draws + 1):
if i == num_draws:
start = 0
end = num_batches * bs
else:
start = i * bs
end = (i + 1) * bs
xb = self.feature[start:end, :]
yb = self.y[start:end, :]
exact_grad = _quad_grad(xb, yb, self.beta)
exact_overlap = np.sum(np.multiply(exact_grad, self.fb_grad))
overlap = stats_row['overlap%d'%(i,)]
self.assertAlmostEqual(exact_overlap, overlap, places=5)
exact_norm = np.linalg.norm(exact_grad) ** 2
norm = stats_row['norm%d'%(i,)]
self.assertAlmostEqual(exact_norm, norm, places=5)
exact_quad = np.dot(exact_grad.T, np.dot(self.hessian, exact_grad))[0, 0]
quad = stats_row['quad%d'%(i,)]
self.assertAlmostEqual(exact_quad, quad, places=5)
noise = exact_grad - self.fb_grad
exact_quad = np.dot(noise.T, np.dot(self.hessian, noise))[0, 0]
quad = stats_row['quad_noise%d'%(i,)]
self.assertAlmostEqual(exact_quad, quad, places=5)
inner_prods = np.dot(q.T,
exact_grad / np.linalg.norm(exact_grad)).flatten()
err = np.max(np.abs(inner_prods - stats_row['hTg'][:, i]))
self.assertAlmostEqual(err, 0.0, places=4)
def test_interpolation(self):
"""Test the linear interpolations using a linear model."""
bs = self.batch_size
num_batches = CONFIG['num_batches']
num_draws = CONFIG['num_eval_draws']
num_obs = num_batches * bs
step = 0
num_points = CONFIG['num_points']
grads, _ = self.evaluator.compute_dirs(self.optimizer)
_, q = np.linalg.eigh(self.hessian)
evecs = [q[:, -k] for k in range(CONFIG['num_eigens'], 0, -1)]
q = q[:, -CONFIG['num_eigens']:]
interps_row = self.evaluator.compute_interpolations(self.optimizer.target,
grads, [], evecs,
[], step)
# Computing the exact full-batch quantities from the linear model
etas = interps_row['step_size']
xb = self.feature[:num_obs, :]
yb = self.y[:num_obs, :]
for i in range(num_draws + 1):
exact_values = np.zeros((num_points,))
dir_vec = _to_vec(grads[i])
# Normalize:
dir_vec = dir_vec / np.linalg.norm(dir_vec)
for j in range(num_points):
new_param = self.beta + etas[j] * dir_vec
errs = yb - np.dot(xb, new_param)
exact_values[j] = np.dot(errs.T, errs)[0, 0] / num_obs
values = interps_row['loss%d'%(i,)]
self.assertTrue(np.allclose(exact_values, values, atol=1e-6, rtol=1e-5))
# Checking interpolations for the eigenvectors
for i in range(CONFIG['num_eigens']):
exact_values = np.zeros((num_points,))
dir_vec = evecs[i].reshape(len(self.beta), 1)
for j in range(num_points):
new_param = self.beta + etas[j] * dir_vec
errs = yb - np.dot(xb, new_param)
exact_values[j] = | np.dot(errs.T, errs) | numpy.dot |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(224, 'P n -3 m :2', transformations)
space_groups[224] = sg
space_groups['P n -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(225, 'F m -3 m', transformations)
space_groups[225] = sg
space_groups['F m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(226, 'F m -3 c', transformations)
space_groups[226] = sg
space_groups['F m -3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(227, 'F d -3 m :2', transformations)
space_groups[227] = sg
space_groups['F d -3 m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,5,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,5])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,5])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,-1,0,1,0,0,0,0,-1]) | numpy.array |
import numpy as np
class Agent(object):
def __init__(self, k, policy, init_exploration, prior=0, gamma=None):
self.policy = policy
self.k = k
self.prior = prior
self.gamma = gamma
self._value_estimates = prior * np.ones(self.k) # Estimated Mean reward
self.action_attempts = np.zeros(self.k)
self.t = 0
self.last_action = None
self.init_exploration = init_exploration
def reset(self):
"""
Resets the agent's memory to an initial state.
"""
self._value_estimates[:] = self.prior * np.ones(self.k)
self.action_attempts[:] = np.zeros(self.k)
self.last_action = None
self.t = 0
def choose(self):
if self.t < self.init_exploration:
action = np.random.randint(self.k)
else:
action = self.policy.choose(self)
self.last_action = action
return action
def observe(self, reward): # Updating value_estimates ! (calculating mean rewards)
self.action_attempts[self.last_action] += 1
if self.gamma is None:
g = 1 / self.action_attempts[self.last_action]
else:
g = self.gamma
q = self._value_estimates[self.last_action]
self._value_estimates[self.last_action] += g * (reward - q)
self.t += 1
@property
def value_estimates(self):
return self._value_estimates
class ContextualAgent(Agent):
"""
( linUCB disjoint model)
"""
def __init__(self, k, d, policy, init_exploration, prior=0, gamma=None):
super().__init__(k, policy, init_exploration, prior, gamma)
self.d = d
self.memory = {action: {'A': np.identity(self.d),
'b': np.zeros((self.d, 1))} for action in range(self.k)}
self.states = np.array([])
self.reset()
def reset(self):
self._value_estimates[:] = self.prior * np.ones(self.k)
self.action_attempts[:] = 0
self.last_action = None
self.t = 0
self.memory = {action: {'A': | np.identity(self.d) | numpy.identity |
# =============================================================================
# Final Code for N Body
# =============================================================================
#Importing libraries
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.integrate import solve_ivp
from matplotlib import animation
from mpl_toolkits.mplot3d import Axes3D
from scipy.integrate import LSODA #not directly used, but can be used to compare how fast LSODA solves compared to RK methods
#%%
G = 6.67430e-11 #Gravitational constant
## Sun inital conditions ##
x_sun_inital=0 #Sun x coord
y_sun_inital=0 #Sun y coord
z_sun_inital=0 #Sun z coord
vx_sun_inital=0 #Sun velocity in x-direction
vy_sun_inital=0 #Sun velocity in y-direction
vz_sun_inital=0 #Sun velocity in z-direction
M_s=1.989e30 #Sun mass in kg
## Earth inital conditions ##
x_earth_inital= 1.496*10**11 #Earth x coord - 1AU initally
y_earth_inital=0 #Earth y coord
z_earth_inital=0 #Earth z coord
vx_earth_inital=0 #Earth velocity in x-direction
vy_earth_inital=np.sqrt((G*M_s)/x_earth_inital) #Earth velocity in y-direction
vz_earth_inital=0 #Earth velocity in z-direction
M_e=5.972*10**24 #Earth mass in kg
## Time the System evolves over ##
year = 3.154*10**7 #Year in seconds
ti=0 #Inital time
tf=5*year #Solves up to 5 years
t=np.arange(ti,tf,10)
#Defining 2D system of Earth and Sun
def solving_system_earth(System_Earth,t):
#Defining a 2D system of all variables to solve at any time t
x_earth,y_earth,x_sun,y_sun,vx_earth,vy_earth,vx_sun,vy_sun = System_Earth
r_se=np.sqrt((x_sun-x_earth)**2 +(y_sun-y_earth)**2) #Radius vector Sun - Earth
return [vx_earth,
vy_earth,
vx_sun,
vy_sun,
(G*M_s/r_se**3) *(x_sun-x_earth),
(G*M_s/r_se**3) *(y_sun-y_earth),
(G*M_e/r_se**3) * (x_earth-x_sun),
(G*M_e/r_se**3) *(y_earth-y_sun)]
#Solving 2D System of Earth and Sun
Solution_2D_Earth = odeint(solving_system_earth, y0=[x_earth_inital, y_earth_inital, x_sun_inital, y_sun_inital,
vx_earth_inital,vy_earth_inital,
vx_sun_inital,vy_sun_inital],
t=t)
Solution_2D_Earth = Solution_2D_Earth/1.496e11 #Converting solution into AU
t1=Solution_2D_Earth.T[0] #time
#%%
# Plotting distance from sun against time (test plot)
fig1=plt.figure(1,figsize=(10,10))
axsec=plt.gca() #gets current axis
axsec.plot((Solution_2D_Earth.T[0]))
axsec.tick_params(labelsize=15) #Increasing tick size
plt.xlabel("Time (Seconds)",fontsize=18)
plt.ylabel("Distance from the Sun in AU",fontsize=18)
plt.title("$x$⨁ against time over 5 years",fontsize=24,x=0.5,y=1.1)
#Adding year axis
axyears=axsec.twiny()
axyears.set_xticks([0,1,2,3,4,5])
axyears.set_xlabel("Time (Years)",fontsize=18)
axyears.tick_params(labelsize=15) #making ticks readable size
plt.show()
#%%
# Plotting full orbit view (test plot 2)
fig2=plt.figure(2,figsize=(12,12))
x_earth_sol= Solution_2D_Earth[:,0] #x coord of Earth
y_earth_sol= Solution_2D_Earth[:,1] #y coord of Earth
x_sun_sol= Solution_2D_Earth[:,2] #x coord of the Sun
y_sun_sol= Solution_2D_Earth[:,3] #y coord of the Sun
plt.plot(x_earth_sol,y_earth_sol,'b') #Plotting Earth's orbit
plt.plot(x_sun_sol,y_sun_sol,'orange',linewidth=5) #Plotting the Sun's orbit
plt.title("Earth's Orbit around the Sun",fontsize=24)
plt.xlabel('$x$' r'$\bigoplus$',fontsize=18)
plt.ylabel('$y$' r'$\bigoplus$',fontsize=18)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.show()
#%%
## 3D Plotting of Earth around the Sun
fig3= plt.figure(3,figsize=(10,10))
ax3=plt.axes(projection='3d') #3d axis setup
plt.plot(x_earth_sol,y_earth_sol,0,linewidth=5) #Plotting Earth Sun orbit with no z components.
plt.plot(x_sun_sol,y_sun_sol,0,linewidth=5)
plt.title("Earth Orbit around Sun 3D Axis",fontsize=20)
plt.xlabel('$x$' r'$\bigoplus$',fontsize=16)
plt.ylabel('$y$' r'$\bigoplus$',fontsize=16)
ax3.set_zlabel('$z$' r'$\bigoplus$',fontsize=16)
ax3.locator_params(nbins=6) #6 ticks on each axis for no overlapping
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
ax3.zaxis.set_tick_params(labelsize=14)
ax3.set_aspect('auto') #auto selects best aspect ratio to display
plt.show()
#%%
## Attempting with Mars ##
## Mars Inital Conditions ##
x_mars_inital= 1.5*1.496e11 #x coord of Mars in AU
y_mars_inital=0 #y coord of Mars
z_mars_inital=0 #Z coord of Mars
vx_mars_inital= 0 #Velocity of Mars in x component
vy_mars_inital= np.sqrt((G*M_s)/x_mars_inital) #Velocity of Mars in y component
vz_mars_inital=0 #Velocity of Mars in z component
M_m= 6.39e23 #Mar's mass in kg
##Defining Mars Sun Problem ##
def evolving_system_mars(System_Mars,t):
#Defining a 2D system of all variables to solve at any time tm
x_mars,y_mars,x_sun,y_sun,vx_mars,vy_mars,vx_sun,vy_sun = System_Mars
r_ms= np.sqrt((x_sun-x_mars)**2 +(y_sun-y_mars)**2)
return [vx_mars,
vy_mars,
vx_sun,
vy_sun,
(G*M_m/r_ms**3)*(x_sun-x_mars),
(G*M_m/r_ms**3) *(y_sun-y_mars),
(G*M_m/r_ms**3) * (x_mars-x_sun),
(G*M_m/r_ms**3) *(y_mars-y_sun)]
#Solving Mars Sun problem
Solution_Mars = odeint(evolving_system_mars, y0=[x_mars_inital, y_mars_inital ,
x_sun_inital,y_sun_inital,
vx_mars_inital,vy_mars_inital,
vx_sun_inital,vy_sun_inital,],
t=t)
Solution_Mars = Solution_Mars/1.496e11 #Converting solution into AU
x_mars_sol= Solution_Mars[:,0] #x coord of Mars
y_mars_sol= Solution_Mars[:,1] #y coord of Mars
#Solving Mars 2D system
def evolving_system_mars(System_Mars,t):
#Defining a 2D system of all variables to solve at any time t
x_mars,y_mars,x_sun,y_sun,vx_mars,vy_mars,vx_sun,vy_sun = System_Mars
rm=np.sqrt((x_sun-x_mars)**2 +(y_sun-y_mars)**2) #Radius vector
return [vx_mars,
vy_mars,
vx_sun,
vy_sun,
(G*M_s/rm**3) *(x_sun-x_mars),
(G*M_s/rm**3) *(y_sun-y_mars),
(G*M_m/rm**3) * (x_mars-x_sun),
(G*M_m/rm**3) *(y_mars-y_sun)]
Solution_2D_Mars = odeint(evolving_system_mars, y0=[x_mars_inital, y_mars_inital ,
x_sun_inital,y_sun_inital,
vx_mars_inital,vy_mars_inital,
vx_sun_inital,vy_sun_inital],
t=t)
Solution_2D_Mars = Solution_2D_Mars/1.496e11 #Converting solution into AU
x_mars_sol= Solution_2D_Mars[:,0] #x coord of Earth
y_mars_sol= Solution_2D_Mars[:,1] #y coord of Earth
x_sun_sol= Solution_2D_Mars[:,2] #x coord of the Sun
y_sun_sol= Solution_2D_Mars[:,3] #y coord of the Sun
## 3D Plotting of Earth, Mars, Sun orbit.
fig4= plt.figure(4,figsize=(10,10))
ax4=plt.axes(projection='3d')
plt.plot(x_mars_sol,y_mars_sol,0,label="Mars Orbit",color='Red') #plots x,y coords of mars
plt.title("Earth and Mars Orbit 3D",fontsize=20)
plt.plot(x_earth_sol,y_earth_sol,color='blue',label="Earth Orbit")
plt.plot(x_sun_sol,y_sun_sol,0,label="Sun Orbit",color='orange',linewidth=4) #Plotting Mars Sun orbit with no z components.
plt.xlabel('$x$' r'$\bigoplus$',fontsize=16)
plt.ylabel('$y$' r'$\bigoplus$',fontsize=16)
ax4.set_zlabel('$z$' r'$\bigoplus$',fontsize=16)
plt.show()
#%%
# =============================================================================
# 2 Heavy Stars and 1 Smaller Mass
# =============================================================================
#Setting inital conditions
#Inital masses
M_e=5.972e24
M_Star1=1e50
M_Star2=1e35
M_Planet=1e20
G=6.6743e-11
#Inital positions
x_star1_inital = 1e10
y_star1_inital = 0
z_star1_inital = 0
x_star2_inital=2e10
y_star2_inital = 1e10
z_star2_inital =0
x_planet_inital =-2e10
y_planet_inital =-2e10
z_planet_inital = 0
#Inital radius vectors
r_s1_s2= np.sqrt((x_star2_inital-x_star1_inital)**2)
r_s1_p3= np.sqrt((x_planet_inital-x_star2_inital)**2)
r_s2_p3 = np.sqrt((x_planet_inital-x_star1_inital)**2)
#Inital velocites
vx_star1_inital =0
vy_star1_inital = np.sqrt(G*M_Star2/np.abs(r_s1_s2))+np.sqrt(G*M_Planet/np.abs(r_s1_p3))
vz_star1_inital = 0
vx_star2_inital = 0
vy_star2_inital = np.sqrt(G*M_Star1/np.abs(r_s1_s2))+np.sqrt(G*M_Planet/np.abs(r_s2_p3))
vz_star2_inital=0
vx_planet_inital = 0
vy_planet_inital = np.sqrt(G*M_Star1/np.abs(r_s1_p3))+np.sqrt(G*M_Star2/np.abs(r_s2_p3))
vz_planet_inital = 0
#Defining three body systems with 2 stars, 1 planet
def three_body_2stars(t, System_2stars):
x_star1,y_star1,z_star1,x_star2,y_star2,z_star2,x_planet,y_planet,z_planet,vx_star1,vy_star1,vz_star1,vx_star2, vy_star2,vz_star2,vx_planet,vy_planet,vz_planet = System_2stars
r_s1_s2 = np.sqrt((x_star2-x_star1)**2 + (y_star2-y_star1)**2 + (z_star2-z_star1)**2)
r_s1_p3 = np.sqrt((x_planet-x_star1)**2 + (y_planet-y_star1)**2 +(z_planet-z_star1)**2)
r_s2_p3 = np.sqrt((x_star2-x_planet)**2 + (y_star2-y_planet)**2 + (z_star2-z_planet)**2)
return [ vx_star1,
vy_star1,
vz_star1,
vx_star2,
vy_star2,
vz_star2,
vx_planet,
vy_planet,
vz_planet,
G*M_Star2/r_s1_s2**3 * (x_star2-x_star1) + M_Planet/r_s1_p3**3 * (x_planet-x_star1), #Star1
G*M_Star2/r_s1_s2**3 * (y_star2-y_star1) + M_Planet/r_s1_p3**3 * (y_planet-y_star1),
G*M_Star2/r_s1_s2**3 * (z_star2-z_star1)+ M_Planet/r_s1_p3**3 *(z_planet-z_star1),
G*M_Star1/r_s1_s2**3 * (x_star1-x_star2) + M_Planet/r_s2_p3**3 * (x_planet-x_star2), #Star2
G*M_Star1/r_s1_s2**3 * (y_star1-y_star2) + M_Planet/r_s2_p3**3 * (y_planet-y_star2),
G*M_Star1/r_s1_s2**3 * (z_star1-z_star2) +M_Planet/r_s2_p3**3 * (z_planet-z_star2),
G*M_Star1/r_s1_p3**3 * (x_star1-x_planet) + M_Star2/r_s2_p3**3 * (x_star2-x_planet), #Planet
G*M_Star1/r_s1_p3**3 * (y_star1-y_planet) + M_Star2/r_s2_p3**3 * (y_star2-y_planet),
G*M_Star1/r_s1_p3**3 *(z_star1-z_planet) + M_Star2/r_s2_p3**3 *(z_star2-z_planet)]
#time system runs over
t_min=0
t_max=1000
t = np.linspace(t_min, t_max, 100000)
#Solving three body system of 2 stars, 1 planet
Solution_3_Body_2_Stars= solve_ivp(three_body_2stars,y0=[x_star1_inital,
y_star1_inital,
z_star1_inital,
x_star2_inital, y_star2_inital ,
z_star2_inital,
x_planet_inital, y_planet_inital,
z_planet_inital,
vx_star1_inital, vy_star1_inital,vz_star1_inital,
vx_star2_inital, vy_star2_inital,vz_star2_inital,
vx_planet_inital, vy_planet_inital,vz_planet_inital],
method='RK23', t_span=(0,1000))
#coordinates of each object over time
x_star1_sol = Solution_3_Body_2_Stars.y[0]
y_star1_sol = Solution_3_Body_2_Stars.y[1]
z_star1_sol = Solution_3_Body_2_Stars.y[2]
x_star2_sol = Solution_3_Body_2_Stars.y[3]
y_star2_sol = Solution_3_Body_2_Stars.y[4]
z_star2_sol = Solution_3_Body_2_Stars.y[5]
x_planet_sol = Solution_3_Body_2_Stars.y[6]
y_planet_sol = Solution_3_Body_2_Stars.y[7]
z_planet_sol = Solution_3_Body_2_Stars.y[8]
t = Solution_3_Body_2_Stars.t
#Animates the three body system by plotting positions to line objects
def animate_2stars_1planet(i):
line1.set_data([x_star1_sol[i]], [y_star1_sol[i]])
line2.set_data([x_star2_sol[i],y_star2_sol[i]])
line3.set_data([x_planet_sol[i],y_planet_sol[i]])
fig5=plt.figure(figsize=(12,12))
ax5=plt.axes()
ax5.set_facecolor('black') #background black for space theme
plt.grid() #adds grid to plot background
#Plotting positions
line1, = plt.plot([], [],'r*', lw=3, markersize=20,label="Star1")
line2, =plt.plot([],[],'b*',lw=3,label="Star2",markersize=20)
line3, = plt.plot([],[],'go',label="Planet",markersize=10)
#Axis labelling
plt.xlabel("$x$(metres)",fontsize=18)
plt.ylabel("$y$(metres)",fontsize=18)
plt.xlim(-10e10,10e10)
plt.ylim(-10e10,10e10)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.legend()
plt.title("2 Stars and a Planet Orbit",fontsize=22)
#blit false for three body systems
ani1 = animation.FuncAnimation(fig5, animate_2stars_1planet,
frames=1000, interval=1,blit=False)
plt.show()
#%%
#3D plotting coordinates over time for 2 star, one planet
fig9=plt.figure(figsize=(22,14))
plt.axis('off')
plt.title("Coordinates Plotted in 3D over Time",fontsize=26)
#Setting up 3 subplots wit 3D axes
ax9=fig9.add_subplot(1,3,1,projection='3d')
ax10=fig9.add_subplot(1,3,2,projection='3d')
ax11=fig9.add_subplot(1,3,3,projection='3d')
plt.subplots_adjust(hspace=0,wspace=0.3,left=0,right=None)
#Plotting star 1 coords
#labelpad used so axes ticks and axes labels do not overlap
ax9.plot(x_star1_sol,y_star1_sol,z_star1_sol,color='r')
ax9.set_xlabel(" X Coordinate (10^10 metres)",fontsize=18,labelpad=30)
ax9.set_ylabel(" Y Coordinate (10^10 metres)",fontsize=18,labelpad=30)
ax9.set_zlabel("Z Coordinate (metres)",fontsize=18,labelpad=30)
ax9.set_title("Coordinates of Star 1",fontsize=22)
ax9.tick_params(axis='both',labelsize=16,pad=10)
#Plotting star 2 coords
ax10.plot(x_star2_sol,y_star2_sol,z_star2_sol,color='b')
ax10.set_xlabel(" X Coordinate (10^13 metres)",fontsize=18,labelpad=30)
ax10.set_ylabel(" Y Coordinate (10^13 metres)",fontsize=18,labelpad=30)
ax10.set_zlabel("Z Coordinate (metres)",fontsize=18,labelpad=30)
ax10.set_title("Coordinates of Star 2",fontsize=22)
ax10.tick_params(axis='both',labelsize=14,pad=10)
#Plotting planet coords
ax11.plot(x_planet_sol,y_planet_sol,z_planet_sol,color='g')
ax11.tick_params(axis='both',labelsize=14,pad=10)
ax11.set_xlabel(" X Coordinate (10^13 metres)",fontsize=18,labelpad=30)
ax11.set_ylabel(" Y Coordinate (10^13 metres)",fontsize=18,labelpad=30)
ax11.set_zlabel("Z Coordinate (metres)",fontsize=18,labelpad=30)
ax11.set_title("Coordinates of Planet",fontsize=22)
plt.show()
#%%
# =============================================================================
# 2 Planets and 1 Star
# =============================================================================
#Setting inital conditions
#masses inital conditions
M_Star1=1e50
M_Planet1=1e20
M_Planet2=1e20
G=6.6743e-11
#positions inital conditions
x_star_inital = 1e10
y_star_inital = 0
z_star_inital = 0
x_planet1_inital=10e10
y_planet1_inital = 10e10
z_planet1_inital =0
x_planet2_inital =-10e10
y_planet2_inital =-10e10
z_planet2_inital = 0
#inital radius vectors
r_p1_s= np.sqrt((x_planet1_inital-x_star_inital)**2)
r_p2_p1= np.sqrt((x_planet2_inital-x_planet1_inital)**2)
r_p2_s = np.sqrt((x_planet2_inital-x_star_inital)**2)
#inital velocities
vx_star_inital =0
vy_star_inital = np.sqrt(G*M_Planet2/np.abs(r_p1_s))+np.sqrt(G*M_Planet1/np.abs(r_p2_p1))
vz_star_inital = 0
vx_planet1_inital = 0
vy_planet1_inital = np.sqrt(G*M_Star1/np.abs(r_p1_s))+np.sqrt(G*M_Planet1/ | np.abs(r_p2_s) | numpy.abs |
""" Packaged MASAC"""
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from unityagents import UnityEnvironment
from buffers.buffer import ReplayBuffer
from models.network import Network
from torch.nn.utils.clip_grad import clip_grad_norm_
class DQNAgent:
def __init__(
self,
env: UnityEnvironment,
memory_size: int,
batch_size: int,
target_update: int,
epsilon_decay: float = 1 / 2000,
max_epsilon: float = 1.0,
min_epsilon: float = 0.1,
gamma: float = 0.99,
):
self.brain_name = env.brain_names[0]
self.brain = env.brains[self.brain_name]
env_info = env.reset(train_mode=True)[self.brain_name]
self.env = env
action_size = self.brain.vector_action_space_size
state = env_info.vector_observations[0]
state_size = len(state)
self.obs_dim = state_size
self.action_dim = 1
self.memory = ReplayBuffer(self.obs_dim, self.action_dim, memory_size, batch_size)
self.batch_size = batch_size
self.target_update = target_update
self.epsilon_decay = epsilon_decay
self.max_epsilon = max_epsilon
self.min_epsilon = min_epsilon
self.gamma = gamma
self.epsilon = max_epsilon
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.dqn = Network(self.obs_dim, self.action_dim)
self.dqn_target = Network(self.obs_dim, self.action_dim)
self.dqn_target.load_state_dict(self.dqn.state_dict())
self.dqn_target.eval()
self.optimizer = optim.Adam(self.dqn.parameters(), lr=5e-5)
self.transition = list()
self.is_test = False
def select_action(self, state: np.ndarray) -> np.int64:
""" Select an action given input """
if self.epsilon > np.random.random():
selected_action = np.random.random_integers(0, self.action_dim-1)
else:
selected_action = self.dqn(
torch.FloatTensor(state).to(self.device)
)
selected_action = np.argmax(selected_action.detach().cpu().numpy())
if not self.is_test:
self.transition = [state, selected_action]
return selected_action
def step(self, action: np.int64) -> Tuple[np.ndarray, np.float64, bool]:
"Take an action and return environment response"
env_info = self.env.step(action)[self.brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
if not self.is_test:
self.transition += [reward, next_state, done]
self.memory.store(*self.transition)
return next_state, reward, done
def update_model(self) -> torch.Tensor:
""" Update model by gradient descent"""
samples = self.memory.sample_batch()
loss = self._compute_dqn_loss(samples)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def train(self, num_episode: int, max_iteration: int=1000, plotting_interval: int=400):
""" train the agent """
self.is_test = False
env_info = self.env.reset(train_mode=True)[self.brain_name]
state = env_info.vector_observations[0]
update_cnt = 0
epsilons = []
losses = []
avg_losses= []
scores = []
avg_scores = []
for episode in range(num_episode):
env_info = self.env.reset(train_mode=True)[self.brain_name]
state = env_info.vector_observations[0]
score = 0
for iter in range(max_iteration):
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
if done:
break
if len(self.memory) > self.batch_size:
loss = self.update_model()
losses.append(loss)
update_cnt += 1
avg_losses.append(np.mean(losses))
losses = []
self.epsilon = max(
self.min_epsilon, self.epsilon - (
self.max_epsilon - self.min_epsilon
) * self.epsilon_decay
)
epsilons.append(self.epsilon)
if update_cnt % self.target_update == 0:
self._target_hard_update()
scores.append(score)
epsilons.append(self.epsilon)
if episode >= 100:
avg_scores.append(np.mean(scores[-100:]))
self._plot(episode, scores, avg_scores, avg_losses, epsilons)
torch.save(self.dqn.state_dict(), "model_weight/dqn.pt")
def test(self):
""" Test agent """
self.is_test = True
env_info = self.env.reset(train_mode=False)[self.brain_name]
state = env_info.vector_observations[0]
done = False
score = 0
while not done:
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
print("score: ", score)
self.env.close()
def _compute_dqn_loss(self, samples: Dict[str, np.ndarray], gamma: float=0.99) -> torch.Tensor:
""" Compute and return DQN loss"""
gamma = self.gamma
device = self.device
state = torch.FloatTensor(samples["obs"]).to(device)
next_state = torch.FloatTensor(samples["next_obs"]).to(device)
action = torch.LongTensor(samples["acts"]).reshape(-1, 1).to(device)
reward = torch.FloatTensor(samples["rews"]).reshape(-1, 1).to(device)
done = torch.FloatTensor(samples["done"]).reshape(-1, 1).to(device)
curr_q_value = self.dqn(state).gather(1, action)
next_q_value = self.dqn_target(next_state).max(dim=1, keepdim=True)[0].detach()
mask = 1 - done
target = (reward + gamma * next_q_value * mask).to(device)
loss = F.smooth_l1_loss(curr_q_value, target)
return loss
def _target_hard_update(self):
""" update target network """
self.dqn_target.load_state_dict(self.dqn.state_dict())
def _plot(
self,
episode :int,
scores: List[float],
avg_scores: List[float],
losses: List[float],
epsilons: List[float]
):
""" Plot the training process"""
plt.figure(figsize=(20, 5))
plt.subplot(141)
if len(avg_scores) > 0:
plt.title("Average reward per 100 episodes. Score: %s" % (avg_scores[-1]))
else:
plt.title("Average reward over 100 episodes.")
plt.plot([100 + i for i in range(len(avg_scores))], avg_scores)
plt.subplot(142)
plt.title("episode %s. Score: %s" % (episode, | np.mean(scores[-10:]) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 2 18:02:17 2016
@author: denis
"""
from math import pi
from itertools import islice
import numpy as np
import pandas as pd
import copy
import matplotlib.pyplot as plt
from pytrx.utils import z_str2num, z_num2str
import pkg_resources
from pytrx import hydro
from pytrx.transformation import Transformation
# from pytrx import transformation
from numba import njit, prange
from mpl_toolkits.mplot3d import Axes3D
class Molecule:
def __init__(self, Z, xyz,
calc_gr=False, rmin=0, rmax=25, dr=0.01,
associated_transformation=None, printing=True):
'''
associated_transformation will be either a transformation class or
a list of transformations
'''
if type(Z) == str:
Z = np.array([Z])
self.Z = Z
self.Z_num = np.array([z_str2num(z) for z in Z])
self.xyz = xyz.copy()
self.xyz_ref = xyz.copy()
self.printing = printing
self.reparameterized = False
# print(type(associated_transformation), Transformation)
print("Running initial check up for associated_transformation")
if associated_transformation is None:
self._associated_transformation = None
elif type(associated_transformation) == list:
if self.printing: print("associated_transformation is a list. Examining elements...")
for t in associated_transformation:
if self.printing: print(f'Checking {t}')
assert issubclass(type(t), Transformation), 'List element is not a Transformation class'
self._associated_transformation = associated_transformation
elif issubclass(type(associated_transformation), Transformation):
self._associated_transformation = [associated_transformation]
else:
raise TypeError('Supplied transformations must be None, a transformation class, or a list of it')
# self.dispersed
# self.dispersed = any([t.dw for t in self._associated_transformation])
#
self._t_keys = [] # list of transformation names - for internal use
self.par0 = {}
self.dispersed = False
if self._associated_transformation is not None:
for t in self._associated_transformation:
t.prepare(self.xyz, self.Z_num)
self._t_keys.append(t.name)
self.par0[t.name] = t.amplitude0
if t.dw:
self.dispersed = True
for key, value in zip(t.dw.suffix, t.dw.standard_value):
self.par0[t.name + key] = value
self.n_par = len(self.par0.keys())
if calc_gr: self.calcGR(rmin=rmin, rmax=rmax, dr=dr)
def calcDistMat(self, return_mat=False):
self.dist_mat = np.sqrt(np.sum((self.xyz[None, :, :] -
self.xyz[:, None, :]) ** 2, axis=2))
if return_mat: return self.dist_mat
def calcGR(self, rmin=0, rmax=25, dr=0.01):
self.calcDistMat()
self.gr = GR(self.Z, rmin=rmin, rmax=rmax, dr=dr)
self.r = self.gr.r
for pair in self.gr.el_pairs:
el1, el2 = pair
idx1, idx2 = (el1 == self.Z, el2 == self.Z)
self.gr[pair] += np.histogram(self.dist_mat[np.ix_(idx1, idx2)].ravel(),
self.gr.r_bins)[0]
def reset_xyz(self):
self.xyz = self.xyz_ref.copy() # as a numpy array we can just use the array's method
def transform(self, par=None, return_xyz=False):
'''
Transforms xyz based on the transformation supplied in the _associated_transformation.
Also takes the par which should be either None or a list that is the same length as the
number of transformations.
reprep: recalculate associated vectors, COMs, etc. after each step (as they might shift)
by calling the prepare() methods within each class.
'''
if (par is not None) and (self._associated_transformation is not None):
# Resets the coordinate set to be transformed
# self.xyz = copy.deepcopy(self.xyz_ref)
self.reset_xyz()
# assert (len(par.keys()) == len(self._associated_transformation)), \
# "Number of parameters not matching number of transformations"
for t in self._associated_transformation:
self.xyz = t.transform(self.xyz, self.Z_num, par[t.name])
if return_xyz:
return self.xyz
def s(self, q, pars=None):
if not hasattr(self, '_atomic_formfactors'):
self._atomic_formfactors = formFactor(q, self.Z)
if pars is None:
pars = self.par0
else:
# print(pars)
# print(self.par0.keys())
assert all([key in pars.keys() for key in self.par0.keys()]), \
'the input parameter dict does not contain all necessary parameter keys'
if self.reparameterized:
pars = self.convert(pars)
if not self.dispersed:
self.transform(pars)
return Debye(q, self, f=self._atomic_formfactors)
else:
pd = []
wd = []
for t in self._associated_transformation:
if t.dw:
_p, _w = t.dw.disperse(pars, t.name)
else:
_p, _w = pars[t.name], 1
pd.append(_p)
wd.append(_w)
pd_grid = [i.ravel() for i in np.meshgrid(*pd)]
wd_grid = [i.ravel() for i in np.meshgrid(*wd)]
n = len(pd_grid[0]) # number of combinations
# _bla = 0
_s = np.zeros(q.shape)
for i in range(n):
_p_dict = {}
_w = 1
for j, key in enumerate(self._t_keys):
_p_dict[key] = pd_grid[j][i]
_w *= wd_grid[j][i]
self.transform(_p_dict)
_s += _w * Debye(q, self, f=self._atomic_formfactors)
return _s
def clash(self):
# Check for clash by whether min distances between two atom types are shorter than 80 % of original (tentative)
pass
def write_xyz(self, fname):
# Write the xyz (NOT xyz_ref) to an xyz file
with open(fname, 'w') as f:
f.write(f'{len(self.Z)}')
f.write(f'\nOutput of xyz for molecule\n')
for i in range(len(self.Z)):
f.write(f'{self.Z[i]} {self.xyz[i][0]} {self.xyz[i][1]} {self.xyz[i][2]}\n')
f.write('\n')
# def sum_parameters(self):
# if self._associated_transformation is not None:
# return len(self._associated_transformation)
def calcDens(self):
self.gr.calcDens()
self.dens = self.gr.dens
def reparameterize(self, par_new, roi_dict, n=11, plotting=False):
if self.dispersed:
raise ValueError('dispersed transformations are incompatible with reparameterization')
assert self.n_par == len(par_new), 'number of new parameters must match the number of original parameters'
self._pc = ParameterConverter(self, par_new)
self._pc.define_conversion(roi_dict, n, plotting=plotting)
self.reparameterized = True
# re-"brand" the parameters:
self.reset_xyz()
self.par0 = self._pc.compute_pars(return_type=dict)
self._t_keys = list(self.par0.keys())
def convert(self, x):
return self._pc.convert(x)
# x_ar = np.array([x[key] for key in x.keys()])
# x_ar = np.hstack((x_ar, [1]))
# # print(x_ar, self.R.shape)
#
# y_out = x_ar @ self._pc.R
# return dict(zip([t.name for t in self._associated_transformation], y_out))
class ParameterConverter:
def __init__(self, molecule, pars):
self.mol = molecule
self.pars = pars # parameters to which we reparameterize
self.t_labels = list(self.mol.par0.keys())
self.R = None
def compute_pars(self, return_type=list):
out = []
for p in self.pars:
if p['type'] == 'distance':
idx1, idx2 = p['group1'], p['group2']
xyz1 = np.mean(self.mol.xyz[idx1, :], axis=0)
xyz2 = np.mean(self.mol.xyz[idx2, :], axis=0)
r = np.linalg.norm(xyz1 - xyz2)
out.append(r)
elif p['type'] == 'angle':
idx1, idx2 = p['group1'], p['group2']
n1 = self._get_normal(self.mol.xyz[idx1, :])
n2 = self._get_normal(self.mol.xyz[idx2, :])
phi = np.arccos(np.sum(n1 * n2))
out.append(np.rad2deg(phi))
if return_type == list:
return out
elif return_type == dict:
return dict(zip([p['label'] for p in self.pars], out))
def _get_normal(self, xyz):
if len(xyz) == 2:
n = xyz[0, :] - xyz[1, :]
else:
# print(xyz)
n, _, _, _ = np.linalg.lstsq(xyz, np.ones(len(xyz)), rcond=-1)
return n / np.linalg.norm(n)
def compute_grid(self, roi, n):
roi_grid = {}
for key in roi.keys():
x1, x2 = roi[key][0], roi[key][1]
roi_grid[key] = np.linspace(x1, x2, n)
grid = np.meshgrid(*[roi_grid[key] for key in self.t_labels])
return [i.ravel() for i in grid]
def define_conversion(self, roi, n, plotting=True):
grid_out = self.compute_grid(roi, n)
# print(grid)
grid_in = []
for vals in zip(*grid_out):
_p = dict(zip(self.t_labels, vals))
self.mol.transform(_p)
out = self.compute_pars()
grid_in.append(out)
grid_in = np.array(grid_in)
grid_out = np.array(grid_out).T
grid_in = np.hstack((grid_in,
np.ones((grid_in.shape[0], 1))))
# print(grid_in.shape, grid_out.shape)
self.R, _, _, _ = np.linalg.lstsq(grid_in, grid_out, rcond=-1)
if plotting:
grid_out_pred = grid_in @ self.R
fig = plt.figure()
plt.clf()
ax = fig.gca(projection='3d')
ax.plot(grid_in[:, 0], grid_in[:, 1], grid_out[:, 0], 'k.')
ax.plot(grid_in[:, 0], grid_in[:, 1], grid_out_pred[:, 0], 'r.')
def convert(self, x):
# print('BLABLABLA')
# print(x)
x_ar = np.array([float(x[key]) for key in x.keys() if key in self.mol._t_keys])
x_ar = np.hstack((x_ar, [1]))
y_out = x_ar @ self.R
return dict(zip([t.name for t in self.mol._associated_transformation], y_out))
class GR:
def __init__(self, Z, rmin=0, rmax=25, dr=0.01, r=None, el_pairs=None):
self.Z = np.unique(Z)
if el_pairs is None:
self.el_pairs = [(z_i, z_j) for i, z_i in enumerate(self.Z) for z_j in self.Z[i:]]
else:
self.el_pairs = el_pairs
if r is None:
# self.r = np.arange(rmin, rmax+dr, dr)
self.r = np.linspace(rmin, rmax, int((rmax - rmin) / dr) + 1)
else:
self.r = r
rmin, rmax, dr = r.min(), r.max(), r[1] - r[0]
# self.r_bins = np.arange(rmin-0.5*dr, rmax+1.5*dr, dr)
print(rmin, type(rmin), dr, type(dr), rmax, type(rmax))
# self.r_bins = np.linspace(float(rmin) - 0.5 * dr, float(rmax) + 0.5 * dr,
# int((float(rmax) - float(rmin)) / dr) + 2)
self.r_bins = np.linspace(float(rmin) + 0.5 * dr, float(rmax) + 0.5 * dr,
int((float(rmax) - float(rmin)) / dr) + 1)
self.gr = {}
for pair in self.el_pairs:
self.gr[frozenset(pair)] = np.zeros(self.r.size)
def __setitem__(self, key, data):
key = frozenset(key)
self.gr[key] = data
def __getitem__(self, key):
key = frozenset(key)
return self.gr[key]
def __add__(self, gr_other):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] + gr_other[pair]
return gr_out
def __sub__(self, gr_other):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] - gr_other[pair]
return gr_out
def __mul__(self, factor):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] * factor
return gr_out
def __truediv__(self, gr_other):
gr_out = GR(self.Z, r=self.r, el_pairs=self.el_pairs)
for pair in self.el_pairs:
gr_out[pair] = self[pair] / gr_other[pair]
return gr_out
def calcDens(self):
self.dens = np.zeros(self.r.shape)
for pair in self.el_pairs:
el1, el2 = pair
z1 = z_str2num(el1)
z2 = z_str2num(el2)
self.dens += z1 * z2 * self.gr[frozenset(pair)]
def save(self, fname):
n = self.r.size
m = len(self.el_pairs)
header = 'r, ' + ', '.join([ '-'.join([i for i in pair]) for pair in self.el_pairs])
data = np.zeros((n, m + 1))
data[:, 0] = self.r
for i, pair in enumerate(self.el_pairs):
if not np.all(np.isnan(self[pair])):
data[:, i + 1] = self[pair]
np.savetxt(fname, data, delimiter=', ', header=header)
### UTILS
def formFactor(q, Elements):
'''
Calculates atomic form-factor at value q
q - np.array of scattering vector values
Elements - np.array or list of elements. May be a string if one wants to
compute form-factor for only one element.
returns a dict of form factors
Examples:
q = np.arange(10)
f = formFactor(q, 'Si')
print(f['Si'])
Elements = ['Si', 'O']
f = formFactor(q, Elements)
print(f['Si'], f['O'])
'''
Elements = np.unique(Elements)
fname = pkg_resources.resource_filename('pytrx', './f0_WaasKirf.dat')
with open(fname) as f:
content = f.readlines()
s = q / (4 * pi)
formFunc = lambda sval, a: np.sum(a[None, :5] * np.exp(-a[None, 6:] * sval[:, None] ** 2), axis=1) + a[5]
f = {}
for i, x in enumerate(content):
if x[0:2] == '#S':
atom = x.split()[-1]
if any([atom == x for x in Elements]):
coef = np.fromstring(content[i + 3], sep='\t')
f[atom] = formFunc(s, coef)
return f
def diff_cage_from_dgr(q, dgr, molecule, solvent_str, r_cut=None):
ff = formFactor(q, dgr.Z)
s = np.zeros(q.shape)
r = dgr.r
w = np.ones(r.shape)
if r_cut:
w[r > r_cut] = 0
# else:
# w = np.exp(-0.5 * (r / r_damp) ** 2)
ksi = q[:, None] * r[None, :]
ksi[ksi < 1e-9] = 1e-9
# w = np.exp(-0.5*(r/5)**2)
Asin = 4 * np.pi * (r[1] - r[0]) * (np.sin(ksi) / ksi) * r[None, :] ** 2 * w
solvent = hydro.solvent_data[solvent_str]
V = solvent.molar_mass / 6.02e23 / (solvent.density / 1e30)
for el1 in np.unique(molecule.Z):
for el2 in np.unique(solvent.Z):
el_pair = (el1, el2)
if not np.all(dgr[el_pair] == 0):
n1 = np.sum(molecule.Z == el1)
n2 = np.sum(solvent.Z == el2)
# print(el1, n1, el2, n2)
_s = ff[el1] * ff[el2] * n1 * n2 / V * (Asin @ dgr[el_pair])
s += _s
return s
def diff_cave_from_dgr(q, dgr, solvent_str, r_damp=25):
ff = formFactor(q, dgr.Z)
s = np.zeros(q.shape)
r = dgr.r
ksi = q[:, None] * r[None, :]
ksi[ksi < 1e-9] = 1e-9
# w = np.exp(-0.5*(r/5)**2)
w = np.ones(r.shape)
w[r > r_damp] = 0
Asin = 4 * np.pi * (r[1] - r[0]) * (np.sin(ksi) / ksi) * r[None, :] ** 2 * w
solvent = hydro.solvent_data[solvent_str]
V = solvent.molar_mass / 6.02e23 / (solvent.density / 1e30)
for el1 in np.unique(solvent.Z):
for el2 in np.unique(solvent.Z):
el_pair = (el1, el2)
if not np.all(dgr[el_pair] == 0):
n1 = np.sum(solvent.Z == el1)
n2 = np.sum(solvent.Z == el2)
# print(el1, n1, el2, n2)
_s = ff[el1] * ff[el2] * n1 * n2 / V * (Asin @ dgr[el_pair])
s += _s
return s
def GRfromFile(filename, delimiter=', ', normalize=False, rmin=25, rmax=30):
names = np.genfromtxt(filename, delimiter=delimiter, names=True, deletechars=',').dtype.names
data = np.genfromtxt(filename, delimiter=delimiter)
# print(data)
els = []
el_pairs = []
for name in names[1:]:
new_pair = name.split('_')
if len(new_pair) == 1:
new_pair = name.split('-')
new_pair = [str.capitalize(i) for i in new_pair]
el_pairs.append([str.capitalize(i) for i in new_pair])
els += new_pair
els = np.unique(els)
# print(els)
# print(el_pairs)
gr = GR(els)
r = data[1:, 0]
for i, pair in enumerate(el_pairs):
gr_array = data[1:, i + 1]
if normalize:
rsel = (r >= rmin) & (r <= rmax)
c = np.mean(gr_array[rsel])
if c != 0:
gr_array /= c
gr[pair] = gr_array
gr.r = r
return gr
def convert2rspace(q, dsq, alpha_damp=0.15, rmax=25, dr=0.01, molecule=None):
r = np.arange(0, rmax+dr, dr)
ksi = q[None, :] * r[:, None]
ksi[ksi<1e-9] = 1e-9
if molecule:
f_sharp = get_f_sharp_for_molecule(q, molecule)
f_sharp /= f_sharp.max()
else:
f_sharp = np.ones(q.shape)
w = q * np.exp( - (alpha_damp * q)**2 ) / f_sharp
# plt.figure()
# plt.plot(q, w)
A_sin = w[None, :] * np.sin(ksi)
return r, A_sin @ dsq
def get_f_sharp_for_molecule(q, molecule):
if hasattr(molecule, '_atomic_formfactors'):
ff = molecule._atomic_formfactors
else:
ff = formFactor(q, molecule.Z)
f_sharp = np.zeros(q.size)
for i in range(molecule.Z.size):
for j in range(i + 1, molecule.Z.size):
z_i = molecule.Z[i]
z_j = molecule.Z[j]
f_sharp += 2 * ff[z_i] * ff[z_j]
return f_sharp
def Debye(q, mol, f=None, atomOnly=False, debug=False):
mol.calcDistMat()
natoms = mol.Z.size
if f is None:
f = formFactor(q, mol.Z)
if debug:
print(f)
Scoh = np.zeros(q.shape)
FFtable = np.zeros((natoms, len(q)))
for idx in range(natoms):
FFtable[idx] = f[mol.Z[idx]]
if atomOnly:
Scoh = np.zeros(q.shape)
for idx1 in range(natoms):
Scoh += f[mol.Z[idx1]] ** 2
else:
Scoh = Scoh_calc2(FFtable, q, mol.dist_mat, natoms)
if debug:
print(Scoh)
return Scoh
@njit
def Scoh_calc(FF, q, r, natoms):
Scoh = np.zeros(q.shape)
for idx1 in range(natoms):
for idx2 in range(idx1 + 1, natoms):
r12 = r[idx1, idx2]
qr12 = q * r12
Scoh += 2 * FF[idx1] * FF[idx2] * np.sin(qr12) / qr12
Scoh += FF[idx1] ** 2
return Scoh
@njit(parallel=True)
def Scoh_calc2(FF, q, r, natoms):
# Scoh = np.zeros(q.shape)
Scoh2 = np.zeros((natoms, len(q)))
for idx1 in prange(natoms):
Scoh2[idx1] += FF[idx1] ** 2
for idx2 in range(idx1 + 1, natoms):
r12 = r[idx1, idx2]
qr12 = q * r12
qr12[qr12<1e-9] = 1e-9
Scoh2[idx1] += 2 * FF[idx1] * FF[idx2] * np.sin(qr12) / qr12
return np.sum(Scoh2, axis=0)
def DebyeFromGR(q, gr, f=None, rmax=None, cage=False):
if f is None:
f = formFactor(q, gr.Z)
if rmax is None: rmax = gr.r.max()
Scoh = np.zeros(q.shape)
rsel = gr.r < rmax
qr = q[:, None] * gr.r[None, rsel]
qr[qr < 1e-6] = 1e-6
Asin = np.sin(qr) / qr
for pair in gr.el_pairs:
el1, el2 = pair
# print(Asin.shape, gr[pair].shape)
pair_scat = f[el1] * f[el2] * (Asin @ gr[pair][rsel])
if el1 == el2:
if cage:
Scoh += 2 * pair_scat
else:
Scoh += pair_scat
else:
Scoh += 2 * pair_scat
return Scoh
def ScatFromDens(q, gr):
gr.calcDens()
qr = q[:, None] * gr.r[None, :]
qr[qr < 1e-6] = 1e-6
Asin = np.sin(qr) / qr
return Asin @ gr.dens
def Compton(z, q):
fname_lowz = pkg_resources.resource_filename('pytrx', './Compton_lowZ.dat')
fname_highz = pkg_resources.resource_filename('pytrx', './Compton_highZ.dat')
data_lowz = pd.read_csv(fname_lowz, sep='\t')
data_highz = pd.read_csv(fname_highz, sep='\t')
data_lowz['Z'] = data_lowz['Z'].apply(lambda x: z_num2str(x))
data_highz['Z'] = data_highz['Z'].apply(lambda x: z_num2str(x))
Scoh = formFactor(q, z)[z] ** 2
z_num = z_str2num(z)
if z in data_lowz['Z'].values:
M, K, L = data_lowz[data_lowz['Z'] == z].values[0, 1:4]
S_inc = (z_num - Scoh / z_num) * (1 - M * (np.exp(-K * q / (4 * pi)) - np.exp(-L * q / (4 * pi))))
# S(idx_un(i),:) = (Z_un(i)-Scoh(idx_un(i),:)/Z_un(i)).*...
# (1-M*(exp(-K*Q/(4*pi))-exp(-L*Q/(4*pi))));
elif z in data_highz['Z'].values:
A, B, C = data_highz[data_highz['Z'] == z].values[0, 1:4]
S_inc = z_num * (1 - A / (1 + B * q / (4 * pi)) ** C)
# S(idx_un(i),:) = Z_un(i)*(1-A./(1+B*Q/(4*pi)).^C);
elif z == 'H':
S_inc = | np.zeros(q.shape) | numpy.zeros |
import platform
import numpy as np
import pytest
from qtpy import PYQT5
from qtpy.QtCore import QPoint, Qt
from qtpy.QtGui import QImage
import PartSegData
from PartSeg.common_backend.base_settings import BaseSettings, ColormapDict, ViewSettings
from PartSeg.common_gui.channel_control import ChannelProperty, ColorComboBox, ColorComboBoxGroup
from PartSeg.common_gui.napari_image_view import ImageView
from PartSegCore.color_image import color_image_fun
from PartSegCore.color_image.base_colors import starting_colors
from PartSegCore.image_operations import NoiseFilterType
from PartSegImage import TiffImageReader
from .utils import CI_BUILD
if PYQT5:
def array_from_image(image: QImage):
size = image.size().width() * image.size().height()
return np.frombuffer(image.bits().asstring(size * 3), dtype=np.uint8)
else:
def array_from_image(image: QImage):
size = image.size().width() * image.size().height()
return np.frombuffer(image.bits(), dtype=np.uint8, count=size * 3)
def test_color_combo_box(qtbot):
dkt = ColormapDict({})
box = ColorComboBox(0, starting_colors, dkt)
box.show()
qtbot.add_widget(box)
with qtbot.waitSignal(box.channel_visible_changed):
with qtbot.assertNotEmitted(box.clicked):
qtbot.mouseClick(box.check_box, Qt.LeftButton)
with qtbot.waitSignal(box.clicked, timeout=1000):
qtbot.mouseClick(box, Qt.LeftButton, pos=QPoint(5, 5))
with qtbot.waitSignal(box.clicked):
qtbot.mouseClick(box, Qt.LeftButton, pos=QPoint(box.width() - 5, 5))
index = 3
with qtbot.waitSignal(box.currentTextChanged):
box.set_color(starting_colors[index])
img = color_image_fun(
np.linspace(0, 256, 512, endpoint=False).reshape((1, 512, 1)), [dkt[starting_colors[index]][0]], [(0, 255)]
)
assert np.all(array_from_image(box.image) == img.flatten())
class TestColorComboBox:
def test_visibility(self, qtbot):
dkt = ColormapDict({})
box = ColorComboBox(0, starting_colors, dkt, lock=True)
box.show()
qtbot.add_widget(box)
assert box.lock.isVisible()
box = ColorComboBox(0, starting_colors, dkt, blur=NoiseFilterType.Gauss)
box.show()
qtbot.add_widget(box)
assert box.blur.isVisible()
box = ColorComboBox(0, starting_colors, dkt, gamma=2)
box.show()
qtbot.add_widget(box)
assert box.gamma.isVisible()
class TestColorComboBoxGroup:
def test_change_channels_num(self, qtbot):
settings = ViewSettings()
box = ColorComboBoxGroup(settings, "test", height=30)
qtbot.add_widget(box)
box.set_channels(1)
box.set_channels(4)
box.set_channels(10)
box.set_channels(4)
box.set_channels(10)
box.set_channels(2)
def test_color_combo_box_group(self, qtbot):
settings = ViewSettings()
box = ColorComboBoxGroup(settings, "test", height=30)
qtbot.add_widget(box)
box.set_channels(3)
assert len(box.current_colors) == 3
assert all(map(lambda x: isinstance(x, str), box.current_colors))
with qtbot.waitSignal(box.coloring_update):
box.layout().itemAt(0).widget().check_box.setChecked(False)
with qtbot.waitSignal(box.coloring_update):
box.layout().itemAt(0).widget().setCurrentIndex(2)
assert box.current_colors[0] is None
assert all(map(lambda x: isinstance(x, str), box.current_colors[1:]))
def test_color_combo_box_group_and_color_preview(self, qtbot):
settings = ViewSettings()
ch_property = ChannelProperty(settings, "test")
box = ColorComboBoxGroup(settings, "test", ch_property, height=30)
qtbot.add_widget(box)
qtbot.add_widget(ch_property)
box.set_channels(3)
box.set_active(1)
with qtbot.assert_not_emitted(box.coloring_update), qtbot.assert_not_emitted(box.change_channel):
ch_property.minimum_value.setValue(10)
ch_property.minimum_value.setValue(100)
def check_parameters(name, index):
return name == "test" and index == 1
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(True)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.minimum_value.setValue(10)
ch_property.maximum_value.setValue(10000)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.maximum_value.setValue(11000)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(False)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.set_value(NoiseFilterType.Gauss)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.set_value(NoiseFilterType.Median)
ch_property.filter_radius.setValue(0.5)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.filter_radius.setValue(2)
with qtbot.waitSignal(box.coloring_update), qtbot.waitSignal(
box.change_channel, check_params_cb=check_parameters
):
ch_property.use_filter.set_value(NoiseFilterType.No)
with qtbot.assert_not_emitted(box.coloring_update), qtbot.assert_not_emitted(box.change_channel):
ch_property.filter_radius.setValue(0.5)
@pytest.mark.xfail((platform.system() == "Windows") and CI_BUILD, reason="GL problem")
def test_image_view_integration(self, qtbot, tmp_path):
settings = BaseSettings(tmp_path)
ch_property = ChannelProperty(settings, "test")
image_view = ImageView(settings, ch_property, "test")
# image_view.show()
qtbot.addWidget(image_view)
qtbot.addWidget(ch_property)
image = TiffImageReader.read_image(PartSegData.segmentation_analysis_default_image)
with qtbot.waitSignals([settings.image_changed, image_view.image_added], timeout=10 ** 6):
settings.image = image
channels_num = image.channels
assert image_view.channel_control.channels_count == channels_num
image_view.viewer_widget.screenshot()
image1 = image_view.viewer_widget.canvas.render()
assert np.any(image1 != 255)
image_view.channel_control.set_active(1)
ch_property.minimum_value.setValue(100)
ch_property.maximum_value.setValue(10000)
ch_property.filter_radius.setValue(0.5)
image2 = image_view.viewer_widget.canvas.render()
assert np.any(image2 != 255)
assert np.all(image1 == image2)
def check_parameters(name, index):
return name == "test" and index == 1
# Test fixed range
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.fixed.setChecked(True)
image1 = image_view.viewer_widget.canvas.render()
assert np.any(image1 != 255)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.minimum_value.setValue(20)
image2 = image_view.viewer_widget.canvas.render()
assert np.any(image2 != 255)
assert np.any(image1 != image2)
with qtbot.waitSignal(image_view.channel_control.coloring_update), qtbot.waitSignal(
image_view.channel_control.change_channel, check_params_cb=check_parameters
):
ch_property.maximum_value.setValue(11000)
image3 = image_view.viewer_widget.screenshot()
assert np.any(image3 != 255)
assert | np.any(image2 != image3) | numpy.any |
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import scipy.interpolate
import scipy.signal
import scipy.spatial
import scipy.stats
import sys
# %% Load data
case = int(sys.argv[1])
print("Case " + str(case))
suffix = '_uphavg'
basedata = np.load('/home/nc1472/git/qg-edgeofchaos/poincare_input/case{}_poincare_config_fd_smooth_uphavg.npz'.format(case))
qbar = basedata['qbar']
uy = basedata['uy']
nx = 2048
x = np.linspace(-np.pi, np.pi, num=nx, endpoint=False)
# Set up interpolation functions
pad = 4
xp = np.zeros(nx+2*pad)
xp[pad:-pad] = x
xp[:pad] = x[-pad:] - 2*np.pi
xp[-pad:] = x[:pad] + 2*np.pi
def circularInterpolant(vec):
vecp = np.zeros(nx+2*pad)
vecp[pad:-pad] = vec
vecp[:pad] = vec[-pad:]
vecp[-pad:] = vec[:pad]
return scipy.interpolate.interp1d(xp, vecp, kind='quadratic')
uyfft = np.fft.rfft(uy)
hilbuy = np.fft.irfft(1j*uyfft)
hilbuyf = circularInterpolant(hilbuy)
uyf = circularInterpolant(uy)
# Compute regions of zonal flow minima and maxima
uyminxs = x[scipy.signal.argrelextrema(uy, np.less)]
uymaxxs = x[scipy.signal.argrelextrema(uy, np.greater)]
# Set up function for computing correlation dimension
def fit_slope(lind, rind, psorted, bounds):
lbound = bounds[lind]
ubound = bounds[rind]
sampinds = np.array(list(map(lambda x: int(np.round(x)), np.geomspace(lbound, ubound, num=256))), dtype=int)
result = scipy.stats.linregress(np.log(psorted[sampinds-1]), np.log(ncorr[sampinds-1]))
return result
# Set up result arrays
nparticles = 127
allstdresids = | np.zeros((nparticles, 257)) | numpy.zeros |
import os
import math
import pandas as pd
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from data.utils import rgb2binary
from pathlib import Path
class ROIGenerator:
def __init__(self, model):
self.model = model
self.colors = {'roi': np.array([0, 1, 0]), 'start': np.array([0, 0, 1]), 'goal': np.array([1, 0, 0])}
def set_parameters(self, m_name=None, m_path='data/dataset/maps/', t_path='data/dataset/tasks/'):
self.m_name = m_name # with extantion
self.fname = Path(m_name).stem # without extantion
Map=Image.open(m_path + self.m_name).convert('RGB')
Map = | np.array(Map) | numpy.array |
import numpy as np
from hypernet.src.thermophysicalModels.chemistry.chemistryModel import Basic
class Standard(Basic):
# Initialization
###########################################################################
def __init__(
self,
specieThermos,
processFlags,
reactionsList=None,
*args,
**kwargs
):
super(Standard, self).__init__(
specieThermos,
processFlags,
reactionsList=reactionsList,
*args,
**kwargs
)
self.m = self.spTh[self.atom].specie.m
# Methods
###########################################################################
# Rates matrices ----------------------------------------------------------
def K_(self, reac):
labels = {
'f': 'kf',
'r': 'kr',
}
Ke = self.Ke_(self.processFlags['excit'], reac, labels) / self.m
Kd = self.Kd_(self.processFlags['diss'], reac, labels) / self.m
Kr = self.Kr_(self.processFlags['diss'], reac, labels) / self.m**2*2
return Ke, Kd, Kr
# Rates matrices derivatives ----------------------------------------------
def dKdT_(self, reac):
labels = {
'f': 'dkfdT',
'r': 'dkrdT',
}
dKedT = self.Ke_(self.processFlags['excit'], reac, labels) / self.m
dKddT = self.Kd_(self.processFlags['diss'], reac, labels) / self.m
dKrdT = self.Kr_(self.processFlags['diss'], reac, labels) / self.m**2*2
return dKedT, dKddT, dKrdT
# Porcesses matrices ------------------------------------------------------
def Ke_(self, mask, reac, labels):
'''Excit. & Relax. rates matrix'''
# Construct Excit. & Relax. matrix
K = np.zeros((self.nSpecies,self.nSpecies), dtype=np.float64)
if mask:
# Get excitation/relaxation rates
reac = reac.loc[reac['reacIndex'].isin(self.processIndices['excit'])]
# Fill matrix
for i, row in reac.iterrows():
l, r = row['indices']
K[l,r] = K[l,r] + row[labels['f']]
K[r,l] = K[r,l] + row[labels['r']]
# Manipulate matrix
K = -np.diag( | np.sum(K, axis=1) | numpy.sum |
# Author: <NAME> <<EMAIL>>
# My imports
from . import tools
# Regular imports
from mir_eval.transcription import precision_recall_f1_overlap as evaluate_notes
from mir_eval.multipitch import evaluate as evaluate_frames
from abc import abstractmethod
from scipy.stats import hmean
from copy import deepcopy
import numpy as np
import sys
import os
EPSILON = sys.float_info.epsilon
# TODO - add warning when unpack returns None
# TODO - none of the stacked evaluators have been tested independently
# - they will likely break during append, average, log, write, etc.
##################################################
# HELPER FUNCTIONS / RESULTS DICTIONARY #
##################################################
def average_results(results):
"""
Obtain the average across all tracked results for each metric
in a results dictionary.
Parameters
----------
results : dictionary
Dictionary containing results of tracks arranged by metric
Returns
----------
average : dictionary
Dictionary with a single value for each metric
"""
# Only modify a local copy which will be returned
average = deepcopy(results)
# Loop through the keys in the dictionary
for key in average.keys():
# Check if the entry is another dictionary
if isinstance(average[key], dict):
# Recursively call this function
average[key] = average_results(average[key])
else:
# Check if the entry is a NumPy array or list - leave it alone otherwise
if isinstance(average[key], np.ndarray) or isinstance(average[key], list):
# Take the average of all entries and convert to float (necessary for logger)
average[key] = float(np.mean(average[key]))
return average
def append_results(tracked_results, new_results):
"""
Combine two results dictionaries. This function is more general than
the signature suggests.
Parameters
----------
tracked_results and new_results : dictionary
Dictionaries containing results of tracks arranged by metric
Returns
----------
tracked_results : dictionary
Dictionary with all results appended along the metric
"""
# Only modify a local copy which will be returned
tracked_results = deepcopy(tracked_results)
# Loop through the keys in the new dictionary
for key in new_results.keys():
# Check if the key already exists in the current dictionary
if key not in tracked_results.keys():
# Add the untracked entry
tracked_results[key] = new_results[key]
# Check if the entry is another dictionary
elif isinstance(new_results[key], dict):
# Recursively call this function
tracked_results[key] = append_results(tracked_results[key], new_results[key])
else:
# Append the new entry (or entries) to the current entry
tracked_results[key] = np.append(tracked_results[key], new_results[key])
return tracked_results
def log_results(results, writer, step=0, patterns=None, tag=''):
"""
Log results using TensorBoardX.
Parameters
----------
results : dictionary
Dictionary containing results of tracks arranged by metric
writer : tensorboardX.SummaryWriter
Writer object being used to log results
step : int
Current iteration in whatever process (e.g. training)
patterns : list of string or None (optional)
Only write metrics containing these patterns (e.g. ['f1', 'pr']) (None for all metrics)
tag : string
Tag for organizing different types of results (e.g. 'validation')
"""
# Loop through the keys in the dictionary
for key in results.keys():
# Extract the next entry
entry = results[key]
# Check if the entry is another dictionary
if isinstance(entry, dict):
# Add the key to the tag and call this function recursively
log_results(entry, writer, step, patterns, tag + f'/{key}')
else:
# Check if the key matches the specified patterns
if pattern_match(key, patterns) or patterns is None:
# Log the entry under the specified key
writer.add_scalar(f'{tag}/{key}', entry, global_step=step)
def write_results(results, file, patterns=None, verbose=False):
"""
Write result dictionary to a text file.
Parameters
----------
results : dictionary
Dictionary containing results of tracks arranged by metric
file : TextIOWrapper
File open in write mode
patterns : list of string or None (optional)
Only write metrics containing these patterns (e.g. ['f1', 'pr']) (None for all metrics)
verbose : bool
Whether to print to console whatever is written to the file
"""
# Loop through the keys in the dictionary
for key in results.keys():
# Check if the key's entry is another dictionary
if isinstance(results[key], dict):
# Write a header to the file
tools.write_and_print(file, f'-----{key}-----', verbose, '\n')
# Call this function recursively
write_results(results[key], file, patterns, verbose)
# Write an empty line
tools.write_and_print(file, '', verbose, '\n')
else:
# Check if the key matches the specified patterns
if pattern_match(key, patterns) or patterns is None:
# Write the metric and corresponding result to the file
tools.write_and_print(file, f' {key} : {results[key]}', verbose, '\n')
# Write an empty line
tools.write_and_print(file, '', verbose, '\n')
def pattern_match(query, patterns=None):
"""
Simple helper function to see if a query matches a list of strings, even if partially.
Parameters
----------
query : string
String to check for matches
patterns : list of string or None (optional)
Patterns to reference, return False if unspecified
Returns
----------
match : bool
Whether the query matches some pattern, fully or partially
"""
# Default the returned value
match = False
# Check if there are any patterns to analyze
if patterns is not None:
# Compare the query to each pattern
match = any([p in query for p in patterns])
return match
##################################################
# EVALUATORS #
##################################################
class Evaluator(object):
"""
Implements a generic music information retrieval evaluator.
"""
def __init__(self, key, save_dir, patterns, verbose):
"""
Initialize parameters common to all evaluators and instantiate.
Parameters
----------
key : string
Key to use when unpacking data and organizing results
save_dir : string or None (optional)
Directory where results for each track will be written
patterns : list of string or None (optional)
Only write/log metrics containing these patterns (e.g. ['f1', 'pr']) (None for all metrics)
verbose : bool
Whether to print any written text to console as well
"""
self.key = key
self.save_dir = None
self.set_save_dir(save_dir)
self.patterns = None
self.set_patterns(patterns)
self.verbose = None
self.set_verbose(verbose)
# Initialize dictionary to track results
self.results = None
self.reset_results()
def set_save_dir(self, save_dir):
"""
Simple helper function to set and create a new save directory.
Parameters
----------
save_dir : string or None (optional)
Directory where estimates for each track will be written
"""
self.save_dir = save_dir
if self.save_dir is not None:
# Create the specified directory if it does not already exist
os.makedirs(self.save_dir, exist_ok=True)
def set_patterns(self, patterns):
"""
Simple helper function to set new patterns.
Parameters
----------
patterns : list of string or None (optional)
Only write/log metrics containing these patterns (e.g. ['f1', 'pr']) (None for all metrics)
"""
self.patterns = patterns
def set_verbose(self, verbose):
"""
Simple helper function to set a new verbose flag.
Parameters
----------
verbose : bool
Whether to print any written text to console as well
"""
self.verbose = verbose
def reset_results(self):
"""
Reset tracked results to empty dictionary.
"""
self.results = dict()
def average_results(self):
"""
Return the average of the currently tracked results.
Returns
----------
average : dictionary
Dictionary with a single value for each metric
"""
# Average the tracked results
average = average_results(self.results)
return average
def get_key(self):
"""
Obtain the key being used for the Evaluator.
Returns
----------
key : string
Key to use when unpacking data and organizing results
"""
if self.key is None:
# Default the key
key = self.get_default_key()
else:
# Use the provided key
key = self.key
return key
@staticmethod
@abstractmethod
def get_default_key():
"""
Provide the default key to use in the event no key was provided.
"""
return NotImplementedError
def unpack(self, data):
"""
Unpack the relevant entry for evaluation if
a dictionary is provided and the entry exists.
Parameters
----------
data : object
Presumably either a dictionary containing ground-truth
or model output, or the already-unpacked entry
Returns
----------
data : object
Unpacked entry or same object provided if no dictionary
"""
# Determine the relevant key for evaluation
key = self.get_key()
# Check if a dictionary was provided and if the key is in the dictionary
data = tools.try_unpack_dict(data, key)
return data
def pre_proc(self, estimated, reference):
"""
Handle both dictionary input as well as relevant input for
both estimated and reference data.
Note: This method can be overridden in order to insert extra steps.
Parameters
----------
estimated : object
Dictionary containing ground-truth or the already-unpacked entry
reference : object
Dictionary containing model output or the already-unpacked entry
Returns
----------
estimated : object
Estimate relevant to the evaluation
reference : object
Reference relevant to the evaluation
"""
# Unpacked estimate and reference if dictionaries were provided
estimated = self.unpack(estimated)
reference = self.unpack(reference)
return estimated, reference
@abstractmethod
def evaluate(self, estimated, reference):
"""
Evaluate an estimate with respect to a reference.
Parameters
----------
estimated : object
Estimate relevant to the evaluation or the dictionary containing it
reference : object
Reference relevant to the evaluation or the dictionary containing it
"""
return NotImplementedError
def write(self, results, track=None):
"""
Write the results dictionary to a text file if a save directory was specified.
Parameters
----------
results : dictionary
Dictionary containing results of tracks arranged by metric
track : string
Name of the track being processed
"""
if self.save_dir is not None:
# Determine how to name the results
tag = tools.get_tag(track)
if self.verbose:
# Print the track name to console as a header to the results
print(f'Evaluating track: {tag}')
# Construct a path for the results
results_path = os.path.join(self.save_dir, f'{tag}.{tools.TXT_EXT}')
# Make sure all directories exist (there can be directories in the track name)
os.makedirs(os.path.dirname(results_path), exist_ok=True)
# Open a file at the path with writing permissions
with open(results_path, 'w') as results_file:
# Write the results to a text file
write_results(results, results_file, self.patterns, self.verbose)
def get_track_results(self, estimated, reference, track=None):
"""
Calculate the results, write them, and track them within the evaluator.
Parameters
----------
estimated : object
Estimate relevant to the evaluation or the dictionary containing it
reference : object
Reference relevant to the evaluation or the dictionary containing it
track : string
Name of the track being processed
Returns
----------
results : dictionary
Dictionary containing results of tracks arranged by metric
"""
# Make sure the estimated and reference data are unpacked
estimated, reference = self.pre_proc(estimated, reference)
# Calculate the results
results = self.evaluate(estimated, reference)
# Add the results to the tracked dictionary
self.results = append_results(self.results, results)
# Write the results
self.write(results, track)
return results
def finalize(self, writer, step=0):
"""
Log the averaged results using TensorBoardX and reset the results tracking.
Parameters
----------
writer : tensorboardX.SummaryWriter
Writer object being used to log results
step : int
Current iteration in whatever process (e.g. training)
"""
# Average the currently tracked results
average = self.average_results()
# Log the currently tracked results
log_results(average, writer, step, patterns=self.patterns, tag=tools.VAL)
# Reset the tracked results
self.reset_results()
class ComboEvaluator(Evaluator):
"""
Packages multiple evaluators into one modules.
"""
def __init__(self, evaluators, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class...
evaluators : list of Evaluator
All of the evaluators to run
"""
self.evaluators = evaluators
super().__init__(None, save_dir, patterns, verbose)
def reset_results(self):
"""
Reset tracked results of each evaluator in the collection.
"""
# Loop through the evaluators
for evaluator in self.evaluators:
# Reset the respective results dictionary so it is empty
evaluator.reset_results()
def average_results(self):
"""
Return the average of the currently tracked results across all evaluators.
Returns
----------
average : dictionary
Dictionary with results dictionary entries for each evaluator
"""
# Initialize an empty dictionary for the average results
average = dict()
# Loop through the evaluators
for evaluator in self.evaluators:
# Average the tracked results for the evaluator
# and place in average results under evaluator's key
results = average_results(evaluator.results)
# Check if there is already an entry for the evaluator's key
if tools.query_dict(average, evaluator.get_key()):
# Add new entries to the results
average[evaluator.get_key()].update(results)
else:
# Create a new entry for the results
average[evaluator.get_key()] = results
return average
@staticmethod
@abstractmethod
def get_default_key():
"""
This should not be called directly on a ComboEvaluator.
"""
return NotImplementedError
@abstractmethod
def evaluate(self, estimated, reference):
"""
This should not be called directly on a ComboEvaluator.
"""
return NotImplementedError
def get_track_results(self, estimated, reference, track=None):
"""
Very similar to parent method, except file is written after results are
calculated for each evaluator and packaged into a single dictionary.
Parameters
----------
estimated : object
Estimate relevant to the evaluation or the dictionary containing it
reference : object
Reference relevant to the evaluation or the dictionary containing it
track : string
Name of the track being processed
Returns
----------
results : dictionary
Dictionary containing results of tracks arranged by metric
"""
# Copy the raw output dictionary and use it to hold estimates
results = {}
# Loop through the evaluators
for evaluator in self.evaluators:
# Make sure the estimated and reference data are unpacked
estimated_, reference_ = evaluator.pre_proc(estimated, reference)
# Calculate the results
new_results = evaluator.evaluate(estimated_, reference_)
# Check if there is already an entry for the evaluator's key
if tools.query_dict(results, evaluator.get_key()):
# Add new entries to the results
results[evaluator.get_key()].update(new_results)
else:
# Create a new entry for the results
results[evaluator.get_key()] = new_results
# Add the results to the tracked dictionary
evaluator.results = append_results(evaluator.results, new_results)
# Write the results
self.write(results, track)
return results
class LossWrapper(Evaluator):
"""
Simple wrapper for tracking, writing, and logging loss.
"""
def __init__(self, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class...
"""
super().__init__(key, save_dir, patterns, verbose)
@staticmethod
def get_default_key():
"""
Default key for loss.
"""
return tools.KEY_LOSS
def evaluate(self, estimated, reference=None):
"""
Simply return loss in a new results dictionary.
Parameters
----------
estimated : ndarray
Single loss value in a NumPy array
reference : irrelevant
Returns
----------
results : dict
Dictionary containing loss
"""
# Package the results into a dictionary
results = estimated
return results
class StackedMultipitchEvaluator(Evaluator):
"""
Implements an evaluator for stacked multi pitch activation maps, i.e.
independent multi pitch estimations across degrees of freedom or instruments.
"""
def __init__(self, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class...
"""
super().__init__(key, save_dir, patterns, verbose)
@staticmethod
def get_default_key():
"""
Default key for multi pitch activation maps.
"""
return tools.KEY_MULTIPITCH
def evaluate(self, estimated, reference):
"""
Evaluate a stacked multi pitch estimate with respect to a reference.
Parameters
----------
estimated : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
reference : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
Dimensions same as estimated
Returns
----------
results : dict
Dictionary containing precision, recall, and f-measure
"""
# Determine the shape necessary to flatten the last two dimensions
flatten_shape = estimated.shape[:-2] + tuple([-1])
# Flatten the estimated and reference data
flattened_multi_pitch_est = np.reshape(estimated, flatten_shape)
flattened_multi_pitch_ref = np.reshape(reference, flatten_shape)
# Determine the number of correct predictions,
# where estimated activation lines up with reference
num_correct = np.sum(flattened_multi_pitch_est * flattened_multi_pitch_ref, axis=-1)
# Count the number of activations predicted
num_predicted = np.sum(flattened_multi_pitch_est, axis=-1)
# Count the number of activations referenced
num_ground_truth = np.sum(flattened_multi_pitch_ref, axis=-1)
# Calculate precision and recall
precision = num_correct / (num_predicted + EPSILON)
recall = num_correct / (num_ground_truth + EPSILON)
# Calculate the f1-score using the harmonic mean formula
f_measure = hmean([precision + EPSILON, recall + EPSILON]) - EPSILON
# Package the results into a dictionary
results = {
tools.KEY_PRECISION : precision,
tools.KEY_RECALL : recall,
tools.KEY_F1 : f_measure
}
return results
class MultipitchEvaluator(StackedMultipitchEvaluator):
"""
Implements an evaluator for multi pitch activation maps.
"""
def __init__(self, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class...
"""
super().__init__(key, save_dir, patterns, verbose)
def evaluate(self, estimated, reference):
"""
Evaluate a multi pitch estimate with respect to a reference.
Parameters
----------
estimated : ndarray (F x T)
Predicted discrete pitch activation map
F - number of discrete pitches
T - number of frames
reference : ndarray (F x T)
Ground-truth discrete pitch activation map
Dimensions same as estimated
Returns
----------
results : dict
Dictionary containing precision, recall, and f-measure
"""
# Convert the multi pitch arrays to stacked multi pitch arrays
stacked_multi_pitch_est = tools.multi_pitch_to_stacked_multi_pitch(estimated)
stacked_multi_pitch_ref = tools.multi_pitch_to_stacked_multi_pitch(reference)
# Call the parent class evaluate function. Multi pitch is just a special
# case of stacked multi pitch, where there is only one degree of freedom
results = super().evaluate(stacked_multi_pitch_est, stacked_multi_pitch_ref)
# Average the results across the degree of freedom - i.e. collapse extraneous dimension
results = average_results(results)
return results
class StackedNoteEvaluator(Evaluator):
"""
Implements an evaluator for stacked (independent) note estimations.
"""
def __init__(self, offset_ratio=None, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class...
offset_ratio : float
Ratio of the reference note's duration used to define the offset tolerance
"""
super().__init__(key, save_dir, patterns, verbose)
self.offset_ratio = offset_ratio
@staticmethod
def get_default_key():
"""
Default key for notes.
"""
return tools.KEY_NOTES
def unpack(self, data):
"""
Unpack notes using the default notes key rather than the specified key.
Parameters
----------
data : object
Presumably either a dictionary containing ground-truth
or model output, or the already-unpacked notes
Returns
----------
data : object
Unpacked notes or same object provided if no dictionary
"""
# Determine the relevant key for evaluation
key = self.get_default_key()
# Check if a dictionary was provided and if the key is in the dictionary
data = tools.try_unpack_dict(data, key)
return data
def evaluate(self, estimated, reference):
"""
Evaluate stacked note estimates with respect to a reference.
Parameters
----------
estimated : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
reference : dict
Dictionary containing (slice -> (pitches, intervals)) pairs
Returns
----------
results : dict
Dictionary containing precision, recall, and f-measure
"""
# Initialize empty arrays to hold results for each degree of freedom
precision, recall, f_measure = np.empty(0), np.empty(0), np.empty(0)
# Loop through the stack of notes
for key in estimated.keys():
# Extract the loose note groups from the stack
pitches_ref, intervals_ref = estimated[key]
pitches_est, intervals_est = reference[key]
# Convert notes to Hertz
pitches_ref = tools.notes_to_hz(pitches_ref)
pitches_est = tools.notes_to_hz(pitches_est)
# Calculate frame-wise precision, recall, and f1 score with or without offset
p, r, f, _ = evaluate_notes(ref_intervals=intervals_ref,
ref_pitches=pitches_ref,
est_intervals=intervals_est,
est_pitches=pitches_est,
offset_ratio=self.offset_ratio)
# Add the results to the respective array
precision = np.append(precision, p)
recall = np.append(recall, r)
f_measure = np.append(f_measure, f)
# Package the results into a dictionary
results = {
tools.KEY_PRECISION : precision,
tools.KEY_RECALL : recall,
tools.KEY_F1 : f_measure
}
return results
class NoteEvaluator(StackedNoteEvaluator):
"""
Implements an evaluator for notes.
"""
def __init__(self, offset_ratio=None, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See StackedNoteEvaluator class...
"""
super().__init__(offset_ratio, key, save_dir, patterns, verbose)
def evaluate(self, estimated, reference):
"""
Evaluate note estimates with respect to a reference.
Parameters
----------
estimated : ndarray (N x 3)
Array of estimated note intervals and pitches by row
N - number of notes
reference : ndarray (N x 3)
Array of ground-truth note intervals and pitches by row
N - number of notes
Returns
----------
results : dict
Dictionary containing precision, recall, and f-measure
"""
# Convert the batches notes to notes
notes_est = tools.batched_notes_to_notes(estimated)
notes_ref = tools.batched_notes_to_notes(reference)
# Convert the notes to stacked notes
stacked_notes_est = tools.notes_to_stacked_notes(*notes_est)
stacked_notes_ref = tools.notes_to_stacked_notes(*notes_ref)
# Call the parent class evaluate function
results = super().evaluate(stacked_notes_est, stacked_notes_ref)
# Average the results across the degree of freedom - i.e. collapse extraneous dimension
results = average_results(results)
return results
class StackedPitchListEvaluator(Evaluator):
"""
Implements an evaluator for stacked (independent) pitch list estimations.
This is equivalent to the discrete multi pitch evaluation protocol for
discrete estimates, but is more general and works for continuous pitch estimations.
"""
def __init__(self, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class...
"""
super().__init__(key, save_dir, patterns, verbose)
@staticmethod
def get_default_key():
"""
Default key for pitch lists.
"""
return tools.KEY_PITCHLIST
def evaluate(self, estimated, reference):
"""
Evaluate stacked pitch list estimates with respect to a reference.
Parameters
----------
estimated : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
reference : dict
Dictionary containing (slice -> (times, pitch_list)) pairs
Returns
----------
results : dict
Dictionary containing precision, recall, and f-measure
"""
# Initialize empty arrays to hold results for each degree of freedom
precision, recall, f_measure = np.empty(0), np.empty(0), np.empty(0)
# Loop through the stack of pitch lists
for key in estimated.keys():
# Extract the pitch lists from the stack
times_ref, pitches_ref = estimated[key]
times_est, pitches_est = reference[key]
# Convert pitch lists to Hertz
pitches_ref = tools.pitch_list_to_hz(pitches_ref)
pitches_est = tools.pitch_list_to_hz(pitches_est)
# Calculate frame-wise precision, recall, and f1 score for continuous pitches
frame_metrics = evaluate_frames(times_ref, pitches_ref, times_est, pitches_est)
# Extract observation-wise precision and recall
p, r = frame_metrics['Precision'], frame_metrics['Recall']
# Calculate the f1-score using the harmonic mean formula
f = hmean([p + EPSILON, r + EPSILON]) - EPSILON
# Add the results to the respective array
precision = np.append(precision, p)
recall = np.append(recall, r)
f_measure = np.append(f_measure, f)
# Package the results into a dictionary
results = {
tools.KEY_PRECISION : precision,
tools.KEY_RECALL : recall,
tools.KEY_F1 : f_measure
}
return results
class PitchListEvaluator(StackedPitchListEvaluator):
"""
Evaluates pitch list estimates against a reference.
This is equivalent to the discrete multi pitch evaluation protocol for
discrete estimates, but is more general and works for continuous pitch estimations.
"""
def __init__(self, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class...
"""
super().__init__(key, save_dir, patterns, verbose)
def evaluate(self, estimated, reference):
"""
Evaluate pitch list estimates with respect to a reference.
Parameters
----------
estimated : tuple containing
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
reference : tuple containing
times : ndarray (N)
Time in seconds of beginning of each frame
N - number of time samples (frames)
pitch_list : list of ndarray (N x [...])
Array of pitches corresponding to notes
N - number of pitch observations (frames)
Returns
----------
results : dict
Dictionary containing precision, recall, and f-measure
"""
# Convert the pitch lists to stacked pitch lists
stacked_pitch_list_est = tools.pitch_list_to_stacked_pitch_list(*estimated)
stacked_pitch_list_ref = tools.pitch_list_to_stacked_pitch_list(*reference)
# Call the parent class evaluate function
results = super().evaluate(stacked_pitch_list_est, stacked_pitch_list_ref)
# Average the results across the degree of freedom - i.e. collapse extraneous dimension
results = average_results(results)
return results
class TablatureEvaluator(Evaluator):
"""
Implements an evaluator for tablature.
"""
def __init__(self, profile, key=None, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class for others...
profile : InstrumentProfile (instrument.py)
Instrument profile detailing experimental setup
"""
super().__init__(key, save_dir, patterns, verbose)
self.profile = profile
@staticmethod
def get_default_key():
"""
Default key for tablature.
"""
return tools.KEY_TABLATURE
def pre_proc(self, estimated, reference):
"""
By default, we anticipate neither estimate
or reference to be in stacked multi pitch format.
TODO - do something similar for pitch list wrapper reference
Parameters
----------
estimated : object
Dictionary containing ground-truth or the already-unpacked entry
reference : object
Dictionary containing model output or the already-unpacked entry
Returns
----------
estimated : object
Estimate relevant to the evaluation
reference : object
Reference relevant to the evaluation
"""
# Unpacked estimate and reference if dictionaries were provided
tablature_est, tablature_ref = super().pre_proc(estimated, reference)
# Convert from tablature format to stacked multi pitch format
tablature_est = tools.tablature_to_stacked_multi_pitch(tablature_est, self.profile)
tablature_ref = tools.tablature_to_stacked_multi_pitch(tablature_ref, self.profile)
return tablature_est, tablature_ref
def evaluate(self, estimated, reference):
"""
Evaluate a stacked multi pitch tablature estimate with respect to a reference.
Parameters
----------
estimated : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
S - number of slices in stack
F - number of discrete pitches
T - number of frames
reference : ndarray (S x F x T)
Array of multiple discrete pitch activation maps
Dimensions same as estimated
Returns
----------
results : dict
Dictionary containing precision, recall, f-measure, and tdr
"""
# Flatten the estimated and reference data along the pitch and degree-of-freedom axis
flattened_tablature_est = estimated.flatten()
flattened_tablature_ref = reference.flatten()
# Count the number of activations predicted
num_predicted = np.sum(flattened_tablature_est, axis=-1)
# Count the number of activations referenced
num_ground_truth = np.sum(flattened_tablature_ref, axis=-1)
# Determine the number of correct tablature predictions,
# where estimated activation lines up with reference
num_correct_tablature = np.sum(flattened_tablature_est * flattened_tablature_ref, axis=-1)
# Calculate precision and recall
precision = num_correct_tablature / (num_predicted + EPSILON)
recall = num_correct_tablature / (num_ground_truth + EPSILON)
# Calculate the f1-score using the harmonic mean formula
f_measure = hmean([precision + EPSILON, recall + EPSILON]) - EPSILON
# Collapse the stacked multi pitch activations into a single representation
multi_pitch_est = tools.stacked_multi_pitch_to_multi_pitch(estimated)
multi_pitch_ref = tools.stacked_multi_pitch_to_multi_pitch(reference)
# Flatten the estimated and reference multi pitch activations
flattened_multi_pitch_est = multi_pitch_est.flatten()
flattened_multi_pitch_ref = multi_pitch_ref.flatten()
# Determine the number of correct predictions,
# where estimated activation lines up with reference
num_correct_multi_pitch = np.sum(flattened_multi_pitch_est * flattened_multi_pitch_ref, axis=-1)
# Calculate the tablature disambiguation rate
tdr = num_correct_tablature / (num_correct_multi_pitch + EPSILON)
# Package the results into a dictionary
results = {
tools.KEY_PRECISION : precision,
tools.KEY_RECALL : recall,
tools.KEY_F1 : f_measure,
tools.KEY_TDR : tdr
}
return results
class SoftmaxAccuracy(Evaluator):
"""
Implements an evaluator for calculating accuracy of softmax groups.
"""
def __init__(self, key, save_dir=None, patterns=None, verbose=False):
"""
Initialize parameters for the evaluator.
Parameters
----------
See Evaluator class for others...
"""
super().__init__(key, save_dir, patterns, verbose)
@staticmethod
def get_default_key():
"""
A key must be provided for softmax groups accuracy.
"""
return NotImplementedError
def evaluate(self, estimated, reference):
"""
Evaluate class membership estimates with respect to a reference.
Parameters
----------
estimated : ndarray (S x T)
Array of class membership estimates for multiple degrees of freedom (e.g. strings)
S - number of degrees of freedom
T - number of samples or frames
reference : ndarray (S x F x T)
Array of class membership ground-truth
Dimensions same as estimated
Returns
----------
results : dict
Dictionary containing accuracy
"""
# Determine the number of correctly identified classes across all groups
num_correct = | np.sum(estimated == reference) | numpy.sum |
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ..simulation.properties import Pose
from ..log import PCG_ROOT_LOGGER
def circular(
radius,
max_theta=2 * np.pi,
step_theta=None,
step_radius=None,
n_theta=None,
n_radius=None,
pose_offset=[0, 0, 0, 0, 0, 0]):
poses = None
assert radius > 0, \
'Radius must be greater than zero, provided={}'.format(
radius)
assert max_theta >= 0 and max_theta <= 2 * np.pi, \
'max_theta must be greater than zero and smaller' \
' than 2 * pi, provided={}'.format(max_theta)
if step_theta is not None:
assert step_theta > 0, \
'n_theta must be greater than zero, provided={}'.format(
n_theta)
theta = np.arange(0, max_theta + step_theta, step_theta)
elif n_theta is not None:
assert n_theta > 0, \
'Number of angle samples must be greater than 0, ' \
'provided={}'.format(n_theta)
if max_theta == 2 * np.pi:
m = max_theta - max_theta / n_theta
else:
m = max_theta
theta = | np.linspace(0, m, n_theta) | numpy.linspace |
import unittest
import numpy as np
from parameterized import parameterized_class
from unittest import mock
from numpy.testing import assert_array_equal
from small_text.data.datasets import SklearnDataset, DatasetView
from small_text.data.datasets import split_data
from small_text.data.exceptions import UnsupportedOperationException
from small_text.data import balanced_sampling, stratified_sampling
from tests.utils.datasets import random_matrix_data
from tests.utils.testing import assert_array_not_equal
@parameterized_class([{'matrix_type': 'sparse', 'target_labels': 'explicit'},
{'matrix_type': 'sparse', 'target_labels': 'inferred'},
{'matrix_type': 'dense', 'target_labels': 'explicit'},
{'matrix_type': 'dense', 'target_labels': 'inferred'}])
class SklearnDatasetTest(unittest.TestCase):
NUM_SAMPLES = 100
def _dataset(self, num_samples=100, return_data=False):
x, y = random_matrix_data(self.matrix_type, num_samples=num_samples)
if self.target_labels not in ['explicit', 'inferred']:
raise ValueError('Invalid test parameter value for target_labels:' + self.target_labels)
target_labels = None if self.target_labels == 'inferred' else np.unique(y)
dataset = SklearnDataset(x, y, target_labels=target_labels)
if return_data:
return dataset, x, y
else:
return dataset
def test_init_when_some_labels_are_none(self):
x, y = random_matrix_data(self.matrix_type, num_samples=self.NUM_SAMPLES)
y = y.tolist()
y[0:10] = [None] * 10
y = np.array(y)
if self.target_labels not in ['explicit', 'inferred']:
raise ValueError('Invalid test parameter value for target_labels:' + self.target_labels)
target_labels = np.array([0, 1]) if self.target_labels == 'inferred' else np.unique(y[10:])
SklearnDataset(x, y, target_labels=target_labels)
def test_get_features(self):
ds, x, y = self._dataset(num_samples=self.NUM_SAMPLES, return_data=True)
self.assertIsNotNone(ds.y)
if self.matrix_type == 'dense':
assert_array_equal(x, ds.x)
else:
self.assertTrue((x != ds.x).nnz == 0)
def test_set_features(self):
ds, x, y = self._dataset(num_samples=self.NUM_SAMPLES, return_data=True)
ds_new = self._dataset(num_samples=self.NUM_SAMPLES)
self.assertIsNotNone(ds.y)
self.assertIsNotNone(ds_new.y)
if self.matrix_type == 'dense':
self.assertFalse((ds.x == ds_new.x).all())
else:
self.assertFalse((ds.x != ds_new.x).nnz == 0)
ds.x = ds_new.x
if self.matrix_type == 'dense':
self.assertTrue((ds.x == ds_new.x).all())
else:
self.assertTrue((ds.x != ds_new.x).nnz == 0)
def test_get_labels(self):
ds, _, y = self._dataset(num_samples=self.NUM_SAMPLES, return_data=True)
assert_array_equal(y, ds.y)
def test_set_labels(self):
ds, _, y = self._dataset(num_samples=self.NUM_SAMPLES, return_data=True)
ds_new, _, y_new = self._dataset(num_samples=self.NUM_SAMPLES, return_data=True)
self.assertFalse((y == y_new).all())
ds.y = ds_new.y
assert_array_equal(y_new, ds.y)
def test_get_target_labels(self):
ds = self._dataset(num_samples=self.NUM_SAMPLES)
expected_target_labels = np.array([0, 1])
assert_array_equal(expected_target_labels, ds.target_labels)
def test_set_target_labels(self):
ds = self._dataset(num_samples=self.NUM_SAMPLES)
expected_target_labels = np.array([0, 1])
assert_array_equal(expected_target_labels, ds.target_labels)
new_target_labels = np.array([2, 3])
ds.target_labels = new_target_labels
| assert_array_equal(new_target_labels, ds.target_labels) | numpy.testing.assert_array_equal |
"""
functions for image segmentation and splitting of training/test
dataset
"""
import time
import numpy as np
import matplotlib.colors as colors
import matplotlib as mpl
# my modules
import chmap.utilities.datatypes.datatypes as datatypes
# machine learning modules
import tensorflow as tf
import matplotlib.pyplot as plt
from IPython.display import clear_output
from sklearn.cluster import KMeans
from skimage import measure
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import *
def normalize(input_image):
"""
normalizes image
:param input_image:
:param input_mask:
:return:
"""
input_image = tf.cast(input_image, tf.float32) / 255.0
# input_mask -= 1
return input_image
def load_image_train(datapoint, size):
input_image = tf.image.resize(datapoint, size)
# input_image = tf.image.resize(datapoint, (128, 128))
# if datapoint['segmentation_mask']:
# input_mask = tf.image.resize(datapoint['segmentation_mask'], size)
# # input_mask = tf.image.resize(segmentation_mask, (128, 128))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
# if input_mask:
# input_mask = tf.image.flip_left_right(input_mask)
# input_image = normalize(input_image)
return input_image
def load_image_val(datapoint):
input_image = tf.image.resize(datapoint['image'], (128, 128))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (128, 128))
input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
def display_sample(display_list):
plt.figure(figsize=(15, 15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))
plt.axis('off')
plt.show()
def create_mask(pred_mask: tf.Tensor) -> tf.Tensor:
"""Return a filter mask with the top 1 predictions
only.
Parameters
----------
pred_mask : tf.Tensor
A [IMG_SIZE, IMG_SIZE, N_CLASS] tensor. For each pixel we have
N_CLASS values (vector) which represents the probability of the pixel
being these classes. Example: A pixel with the vector [0.0, 0.0, 1.0]
has been predicted class 2 with a probability of 100%.
Returns
-------
tf.Tensor
A [IMG_SIZE, IMG_SIZE, 1] mask with top 1 predictions
for each pixels.
"""
# pred_mask -> [IMG_SIZE, SIZE, N_CLASS]
# 1 prediction for each class but we want the highest score only
# so we use argmax
pred_mask = tf.argmax(pred_mask, axis=-1)
# pred_mask becomes [IMG_SIZE, IMG_SIZE]
# but matplotlib needs [IMG_SIZE, IMG_SIZE, 1]
pred_mask = tf.expand_dims(pred_mask, axis=-1)
return pred_mask
def show_predictions(sample_image=None, sample_mask=None, dataset=None, model=None, num=1):
"""Show a sample prediction.
Parameters
----------
dataset : [type], optional
[Input dataset, by default None
num : int, optional
Number of sample to show, by default 1
"""
if dataset:
for image, mask in dataset.take(num):
pred_mask = model.predict(image)
display_sample([image, mask, create_mask(pred_mask)])
else:
# The model is expecting a tensor of the size
# [BATCH_SIZE, IMG_SIZE, IMG_SIZE, 3]
# but sample_image[0] is [IMG_SIZE, IMG_SIZE, 3]
# and we want only 1 inference to be faster
# so we add an additional dimension [1, IMG_SIZE, IMG_SIZE, 3]
one_img_batch = sample_image[0][tf.newaxis, ...]
# one_img_batch -> [1, IMG_SIZE, IMG_SIZE, 3]
inference = model.predict(one_img_batch)
# inference -> [1, IMG_SIZE, IMG_SIZE, N_CLASS]
pred_mask = create_mask(inference)
# pred_mask -> [1, IMG_SIZE, IMG_SIZE, 1]
display_sample([sample_image[0], sample_mask[0],
pred_mask[0]])
#### more advanced training
class DisplayCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
clear_output(wait=True)
# show_predictions()
print('\nSample Prediction after epoch {}\n'.format(epoch + 1))
#### apply detection
def ml_chd(model, iit_list, los_list, use_indices, inst_list):
start = time.time()
chd_image_list = [datatypes.CHDImage()] * len(inst_list)
for inst_ind, instrument in enumerate(inst_list):
if iit_list[inst_ind] is not None:
# define CHD parameters
image_data = iit_list[inst_ind].iit_data
use_chd = use_indices[inst_ind]
# ML CHD
# create correct data format
scalarMap = mpl.cm.ScalarMappable(norm=colors.LogNorm(vmin=1.0, vmax=np.max(image_data)),
cmap='sohoeit195')
colorVal = scalarMap.to_rgba(image_data, norm=True)
data_x = colorVal[:, :, :3]
# apply ml algorithm
ml_output = model.predict(data_x[tf.newaxis, ...], verbose=1)
result = (ml_output[0] > 0.1).astype(np.uint8)
# use_chd = np.logical_and(image_data != -9999, result.squeeze() > 0)
pred = np.zeros(shape=result.squeeze().shape)
pred[use_chd] = result.squeeze()[use_chd]
# pred = np.zeros(shape=ml_output.squeeze().shape)
# pred[use_chd] = ml_output.squeeze()[use_chd]
# chd_result = np.logical_and(pred == 1, use_chd == 1)
# chd_result = chd_result.astype(int)
# binary_result = np.logical_and(binary_output == 1, use_chd == 1)
# binary_result = binary_result.astype(int)
# create CHD image
chd_image_list[inst_ind] = datatypes.create_chd_image(los_list[inst_ind], pred)
chd_image_list[inst_ind].get_coordinates()
# chd_binary_list[inst_ind] = datatypes.create_chd_image(los_list[inst_ind], binary_result)
# chd_binary_list[inst_ind].get_coordinates()
end = time.time()
print("Coronal Hole Detection algorithm implemented in", end - start, "seconds.")
return chd_image_list
def conv2d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
"""Function to add 2 convolutional layers with the parameters passed to it"""
# first layer
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size), kernel_initializer='he_normal',
padding='same')(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation('relu')(x)
# second layer
x = Conv2D(filters=n_filters, kernel_size=(kernel_size, kernel_size),
kernel_initializer='he_normal', padding='same')(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def get_unet(input_img, n_filters=16, dropout=0.1, batchnorm=True):
# Contracting Path
c1 = conv2d_block(input_img, n_filters * 1, kernel_size=3, batchnorm=batchnorm)
p1 = MaxPooling2D((2, 2))(c1)
p1 = Dropout(dropout)(p1)
c2 = conv2d_block(p1, n_filters * 2, kernel_size=3, batchnorm=batchnorm)
p2 = MaxPooling2D((2, 2))(c2)
p2 = Dropout(dropout)(p2)
c3 = conv2d_block(p2, n_filters * 4, kernel_size=3, batchnorm=batchnorm)
p3 = MaxPooling2D((2, 2))(c3)
p3 = Dropout(dropout)(p3)
c4 = conv2d_block(p3, n_filters * 8, kernel_size=3, batchnorm=batchnorm)
p4 = MaxPooling2D((2, 2))(c4)
p4 = Dropout(dropout)(p4)
c5 = conv2d_block(p4, n_filters=n_filters * 16, kernel_size=3, batchnorm=batchnorm)
# Expansive Path
u6 = Conv2DTranspose(n_filters * 8, (3, 3), strides=(2, 2), padding='same')(c5)
u6 = concatenate([u6, c4])
u6 = Dropout(dropout)(u6)
c6 = conv2d_block(u6, n_filters * 8, kernel_size=3, batchnorm=batchnorm)
u7 = Conv2DTranspose(n_filters * 4, (3, 3), strides=(2, 2), padding='same')(c6)
u7 = concatenate([u7, c3])
u7 = Dropout(dropout)(u7)
c7 = conv2d_block(u7, n_filters * 4, kernel_size=3, batchnorm=batchnorm)
u8 = Conv2DTranspose(n_filters * 2, (3, 3), strides=(2, 2), padding='same')(c7)
u8 = concatenate([u8, c2])
u8 = Dropout(dropout)(u8)
c8 = conv2d_block(u8, n_filters * 2, kernel_size=3, batchnorm=batchnorm)
u9 = Conv2DTranspose(n_filters * 1, (3, 3), strides=(2, 2), padding='same')(c8)
u9 = concatenate([u9, c1])
u9 = Dropout(dropout)(u9)
c9 = conv2d_block(u9, n_filters * 1, kernel_size=3, batchnorm=batchnorm)
outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = Model(inputs=[input_img], outputs=[outputs])
return model
def load_model(model_h5, IMG_SIZE=2048, N_CHANNELS=3):
"""
function to load keras model from hdf5 file
:param model_h5:
:param IMG_SIZE:
:param N_CHANNELS:
:return:
"""
input_img = Input((IMG_SIZE, IMG_SIZE, N_CHANNELS), name='img')
model = get_unet(input_img, n_filters=16, dropout=0.05, batchnorm=True)
model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
model.load_weights(model_h5)
return model
def cluster_brightness(clustered_img, org_img, n_clusters):
# create average color array
avg_color = []
for i in range(0, n_clusters):
cluster_indices = np.where(clustered_img == i)
# average per row
average_color_per_row = np.average(org_img[cluster_indices], axis=0)
# find average across average per row
avg_color.append(average_color_per_row)
return avg_color
def kmeans_detection(org_map, use_data, arr, N_CLUSTERS, IMG_HEIGHT, IMG_WIDTH, map_x, map_y):
optimalk = KMeans(n_clusters=N_CLUSTERS, random_state=0, init='k-means++').fit(arr)
labels = optimalk.labels_
pred_clustered = labels.reshape(IMG_HEIGHT, IMG_WIDTH)
# get cluster brightnesses
avg_color = cluster_brightness(pred_clustered, use_data, N_CLUSTERS)
color_order = np.argsort(avg_color)
### CH Detection
chd_clustered = pred_clustered + 1
chd_clustered = np.where(np.logical_or(chd_clustered == color_order[0] + 1, chd_clustered == color_order[1] + 1), N_CLUSTERS + 1, 0)
chd_clustered = np.where(chd_clustered == N_CLUSTERS + 1, 1, 0)
# area constraint
chd_labeled = measure.label(chd_clustered, connectivity=2, background=0, return_num=True)
# get area
chd_area = [props.area for props in measure.regionprops(chd_labeled[0])]
# remove CH with less than 10 pixels in area
chd_good_area = np.where(np.array(chd_area) > 25)
indices = []
chd_plot = np.zeros(chd_labeled[0].shape)
for val in chd_good_area[0]:
val_label = val + 1
indices.append(np.logical_and(chd_labeled[0] == val_label, val in chd_good_area[0]))
for idx in indices:
chd_plot[idx] = chd_labeled[0][idx] + 1
#### ACTIVE REGION DETECTION
# get cluster brightness
ar_clustered = pred_clustered + 1
ar_clustered = np.where(ar_clustered == color_order[-1] + 1, N_CLUSTERS + 1, 0)
ar_clustered = | np.where(ar_clustered == N_CLUSTERS + 1, 1, 0) | numpy.where |
"""
Tests for the generic MLEModel
Author: <NAME>
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import (sarimax, varmax, kalman_filter,
kalman_smoother)
from statsmodels.tsa.statespace.mlemodel import MLEModel, MLEResultsWrapper
from statsmodels.tsa.statespace.tools import compatibility_mode
from statsmodels.datasets import nile
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from statsmodels.tsa.statespace.tests.results import results_sarimax, results_var_misc
current_path = os.path.dirname(os.path.abspath(__file__))
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
# Basic kwargs
kwargs = {
'k_states': 1, 'design': [[1]], 'transition': [[1]],
'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
def get_dummy_mod(fit=True, pandas=False):
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
if pandas:
index = pd.date_range('1960-01-01', periods=100, freq='MS')
endog = pd.Series(endog, index=index)
exog = pd.Series(exog, index=index)
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
if fit:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
else:
res = None
return mod, res
def test_wrapping():
# Test the wrapping of various Representation / KalmanFilter /
# KalmanSmoother methods / attributes
mod, _ = get_dummy_mod(fit=False)
# Test that we can get the design matrix
assert_equal(mod['design', 0, 0], 2.0 * np.arange(100))
# Test that we can set individual elements of the design matrix
mod['design', 0, 0, :] = 2
assert_equal(mod.ssm['design', 0, 0, :], 2)
assert_equal(mod.ssm['design'].shape, (1, 1, 100))
# Test that we can set the entire design matrix
mod['design'] = [[3.]]
assert_equal(mod.ssm['design', 0, 0], 3.)
# (Now it's no longer time-varying, so only 2-dim)
assert_equal(mod.ssm['design'].shape, (1, 1))
# Test that we can change the following properties: loglikelihood_burn,
# initial_variance, tolerance
assert_equal(mod.loglikelihood_burn, 1)
mod.loglikelihood_burn = 0
assert_equal(mod.ssm.loglikelihood_burn, 0)
assert_equal(mod.tolerance, mod.ssm.tolerance)
mod.tolerance = 0.123
assert_equal(mod.ssm.tolerance, 0.123)
assert_equal(mod.initial_variance, 1e10)
mod.initial_variance = 1e12
assert_equal(mod.ssm.initial_variance, 1e12)
# Test that we can use the following wrappers: initialization,
# initialize_known, initialize_stationary, initialize_approximate_diffuse
# Initialization starts off as none
assert_equal(mod.initialization, None)
# Since the SARIMAX model may be fully stationary or may have diffuse
# elements, it uses a custom initialization by default, but it can be
# overridden by users
mod.initialize_state()
# (The default initialization in this case is known because there is a non-
# stationary state corresponding to the time-varying regression parameter)
assert_equal(mod.initialization, 'known')
mod.initialize_approximate_diffuse(1e5)
assert_equal(mod.initialization, 'approximate_diffuse')
assert_equal(mod.ssm._initial_variance, 1e5)
mod.initialize_known([5.], [[40]])
assert_equal(mod.initialization, 'known')
assert_equal(mod.ssm._initial_state, [5.])
assert_equal(mod.ssm._initial_state_cov, [[40]])
mod.initialize_stationary()
assert_equal(mod.initialization, 'stationary')
# Test that we can use the following wrapper methods: set_filter_method,
# set_stability_method, set_conserve_memory, set_smoother_output
# The defaults are as follows:
assert_equal(mod.ssm.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(mod.ssm.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(mod.ssm.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
assert_equal(mod.ssm.smoother_output, kalman_smoother.SMOOTHER_ALL)
# Now, create the Cython filter object and assert that they have
# transferred correctly
mod.ssm._initialize_filter()
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# (the smoother object is so far not in Cython, so there is no
# transferring)
# Change the attributes in the model class
if compatibility_mode:
assert_raises(NotImplementedError, mod.set_filter_method, 100)
else:
mod.set_filter_method(100)
mod.set_stability_method(101)
mod.set_conserve_memory(102)
mod.set_smoother_output(103)
# Assert that the changes have occurred in the ssm class
if not compatibility_mode:
assert_equal(mod.ssm.filter_method, 100)
assert_equal(mod.ssm.stability_method, 101)
assert_equal(mod.ssm.conserve_memory, 102)
assert_equal(mod.ssm.smoother_output, 103)
# Assert that the changes have *not yet* occurred in the filter object
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# Re-initialize the filter object (this would happen automatically anytime
# loglike, filter, etc. were called)
# In this case, an error will be raised since filter_method=100 is not
# valid
# Note: this error is only raised in the compatibility case, since the
# newer filter logic checks for a valid filter mode at a different point
if compatibility_mode:
assert_raises(NotImplementedError, mod.ssm._initialize_filter)
# Now, test the setting of the other two methods by resetting the
# filter method to a valid value
mod.set_filter_method(1)
mod.ssm._initialize_filter()
# Retrieve the new kalman filter object (a new object had to be created
# due to the changing filter method)
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, 1)
assert_equal(kf.stability_method, 101)
assert_equal(kf.conserve_memory, 102)
def test_fit_misc():
true = results_sarimax.wpi1_stationary
endog = np.diff(true['data'])[1:]
mod = sarimax.SARIMAX(endog, order=(1,0,1), trend='c')
# Test optim_hessian={'opg','oim','approx'}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = mod.fit(method='ncg', disp=0, optim_hessian='opg', optim_complex_step=False)
res2 = mod.fit(method='ncg', disp=0, optim_hessian='oim', optim_complex_step=False)
# Check that the Hessians broadly result in the same optimum
assert_allclose(res1.llf, res2.llf, rtol=1e-2)
# Test return_params=True
mod, _ = get_dummy_mod(fit=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res_params = mod.fit(disp=-1, return_params=True)
# 5 digits necessary to accommodate 32-bit numpy / scipy with OpenBLAS 0.2.18
assert_almost_equal(res_params, [0, 0], 5)
def test_score_misc():
mod, res = get_dummy_mod()
# Test that the score function works
mod.score(res.params)
def test_from_formula():
assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1,2,3))
def test_score_analytic_ar1():
# Test the score against the analytic score for an AR(1) model with 2
# observations
# Let endog = [1, 0.5], params=[0, 1]
mod = sarimax.SARIMAX([1, 0.5], order=(1,0,0))
def partial_phi(phi, sigma2):
return -0.5 * (phi**2 + 2*phi*sigma2 - 1) / (sigma2 * (1 - phi**2))
def partial_sigma2(phi, sigma2):
return -0.5 * (2*sigma2 + phi - 1.25) / (sigma2**2)
params = np.r_[0., 2]
# Compute the analytic score
analytic_score = np.r_[
partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])]
# Check each of the approximations, transformed parameters
approx_cs = mod.score(params, transformed=True, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(params, transformed=True, approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(params, transformed=True, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(params, transformed=True, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the approximations for untransformed parameters. The analytic
# check now comes from chain rule with the analytic derivative of the
# transformation
# if L* is the likelihood evaluated at untransformed parameters and
# L is the likelihood evaluated at transformed parameters, then we have:
# L*(u) = L(t(u))
# and then
# L'*(u) = L'(t(u)) * t'(u)
def partial_transform_phi(phi):
return -1. / (1 + phi**2)**(3./2)
def partial_transform_sigma2(sigma2):
return 2. * sigma2
uparams = mod.untransform_params(params)
analytic_score = np.dot(
np.diag(np.r_[partial_transform_phi(uparams[0]),
partial_transform_sigma2(uparams[1])]),
np.r_[partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])])
approx_cs = mod.score(uparams, transformed=False, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(uparams, transformed=False,
approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(uparams, transformed=False, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the Hessian: these approximations are not very good, particularly
# when phi is close to 0
params = np.r_[0.5, 1.]
def hessian(phi, sigma2):
hessian = np.zeros((2,2))
hessian[0,0] = (-phi**2 - 1) / (phi**2 - 1)**2
hessian[1,0] = hessian[0,1] = -1 / (2 * sigma2**2)
hessian[1,1] = (sigma2 + phi - 1.25) / sigma2**3
return hessian
analytic_hessian = hessian(params[0], params[1])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_allclose(mod._hessian_complex_step(params) * 2,
analytic_hessian, atol=1e-1)
assert_allclose(mod._hessian_finite_difference(params) * 2,
analytic_hessian, atol=1e-1)
def test_cov_params():
mod, res = get_dummy_mod()
# Smoke test for each of the covariance types
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(res.params, disp=-1, cov_type='none')
assert_equal(res.cov_kwds['description'], 'Covariance matrix not calculated.')
res = mod.fit(res.params, disp=-1, cov_type='approx')
assert_equal(res.cov_type, 'approx')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical (complex-step) differentiation.')
res = mod.fit(res.params, disp=-1, cov_type='oim')
assert_equal(res.cov_type, 'oim')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='opg')
assert_equal(res.cov_type, 'opg')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the outer product of gradients (complex-step).')
res = mod.fit(res.params, disp=-1, cov_type='robust')
assert_equal(res.cov_type, 'robust')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_oim')
assert_equal(res.cov_type, 'robust_oim')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_approx')
assert_equal(res.cov_type, 'robust_approx')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using numerical (complex-step) differentiation.')
assert_raises(NotImplementedError, mod.fit, res.params, disp=-1, cov_type='invalid_cov_type')
def test_transform():
# The transforms in MLEModel are noops
mod = MLEModel([1,2], **kwargs)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [2, 3])
assert_allclose(mod.untransform_params([2, 3]), [2, 3])
# Smoke test for transformation in `filter`, `update`, `loglike`,
# `loglikeobs`
mod.filter([], transformed=False)
mod.update([], transformed=False)
mod.loglike([], transformed=False)
mod.loglikeobs([], transformed=False)
# Note that mod is an SARIMAX instance, and the two parameters are
# variances
mod, _ = get_dummy_mod(fit=False)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [4, 9])
assert_allclose(mod.untransform_params([4, 9]), [2, 3])
# Test transformation in `filter`
res = mod.filter([2, 3], transformed=True)
assert_allclose(res.params, [2, 3])
res = mod.filter([2, 3], transformed=False)
assert_allclose(res.params, [4, 9])
def test_filter():
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
# Test return of ssm object
res = mod.filter([], return_ssm=True)
assert_equal(isinstance(res, kalman_filter.FilterResults), True)
# Test return of full results object
res = mod.filter([])
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'opg')
# Test return of full results object, specific covariance type
res = mod.filter([], cov_type='oim')
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'oim')
def test_params():
mod = MLEModel([1,2], **kwargs)
# By default start_params raises NotImplementedError
assert_raises(NotImplementedError, lambda: mod.start_params)
# But param names are by default an empty array
assert_equal(mod.param_names, [])
# We can set them in the object if we want
mod._start_params = [1]
mod._param_names = ['a']
assert_equal(mod.start_params, [1])
assert_equal(mod.param_names, ['a'])
def check_results(pandas):
mod, res = get_dummy_mod(pandas=pandas)
# Test fitted values
assert_almost_equal(res.fittedvalues[2:], mod.endog[2:].squeeze())
# Test residuals
assert_almost_equal(res.resid[2:], np.zeros(mod.nobs-2))
# Test loglikelihood_burn
assert_equal(res.loglikelihood_burn, 1)
def test_results(pandas=False):
check_results(pandas=False)
check_results(pandas=True)
def test_predict():
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.Series([1,2], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Test that predict with start=None, end=None does prediction with full
# dataset
predict = res.predict()
assert_equal(predict.shape, (mod.nobs,))
assert_allclose(res.get_prediction().predicted_mean, predict)
# Test a string value to the dynamic option
assert_allclose(res.predict(dynamic='1981-01-01'), res.predict())
# Test an invalid date string value to the dynamic option
# assert_raises(ValueError, res.predict, dynamic='1982-01-01')
# Test for passing a string to predict when dates are not set
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
assert_raises(KeyError, res.predict, dynamic='string')
def test_forecast():
# Numpy
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
forecast = res.forecast(steps=10)
assert_allclose(forecast, np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, forecast)
# Pandas
index = pd.date_range('1960-01-01', periods=2, freq='MS')
mod = MLEModel(pd.Series([1,2], index=index), **kwargs)
res = mod.filter([])
assert_allclose(res.forecast(steps=10), np.ones((10,)) * 2)
assert_allclose(res.forecast(steps='1960-12-01'), np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, np.ones((10,)) * 2)
def test_summary():
dates = pd.date_range(start='1980-01-01', end='1984-01-01', freq='AS')
endog = pd.Series([1,2,3,4,5], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Get the summary
txt = str(res.summary())
# Test res.summary when the model has dates
assert_equal(re.search('Sample:\s+01-01-1980', txt) is not None, True)
assert_equal(re.search('\s+- 01-01-1984', txt) is not None, True)
# Test res.summary when `model_name` was not provided
assert_equal(re.search('Model:\s+MLEModel', txt) is not None, True)
# Smoke test that summary still works when diagnostic tests fail
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res.filter_results._standardized_forecasts_error[:] = np.nan
res.summary()
res.filter_results._standardized_forecasts_error = 1
res.summary()
res.filter_results._standardized_forecasts_error = 'a'
res.summary()
def check_endog(endog, nobs=2, k_endog=1, **kwargs):
# create the model
mod = MLEModel(endog, **kwargs)
# the data directly available in the model is the Statsmodels version of
# the data; it should be 2-dim, C-contiguous, long-shaped:
# (nobs, k_endog) == (2, 1)
assert_equal(mod.endog.ndim, 2)
assert_equal(mod.endog.flags['C_CONTIGUOUS'], True)
assert_equal(mod.endog.shape, (nobs, k_endog))
# the data in the `ssm` object is the state space version of the data; it
# should be 2-dim, F-contiguous, wide-shaped (k_endog, nobs) == (1, 2)
# and it should share data with mod.endog
assert_equal(mod.ssm.endog.ndim, 2)
assert_equal(mod.ssm.endog.flags['F_CONTIGUOUS'], True)
assert_equal(mod.ssm.endog.shape, (k_endog, nobs))
assert_equal(mod.ssm.endog.base is mod.endog, True)
return mod
def test_basic_endog():
# Test various types of basic python endog inputs (e.g. lists, scalars...)
# Check cannot call with non-array-like
# fails due to checks in Statsmodels base classes
assert_raises(ValueError, MLEModel, endog=1, k_states=1)
assert_raises(ValueError, MLEModel, endog='a', k_states=1)
assert_raises(ValueError, MLEModel, endog=True, k_states=1)
# Check behavior with different types
mod = MLEModel([1], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([1.], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([True], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel(['a'], **kwargs)
# raises error due to inability coerce string to numeric
assert_raises(ValueError, mod.filter, [])
# Check that a different iterable tpyes give the expected result
endog = [1.,2.]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = [[1.],[2.]]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = (1.,2.)
mod = check_endog(endog, **kwargs)
mod.filter([])
def test_numpy_endog():
# Test various types of numpy endog inputs
# Check behavior of the link maintained between passed `endog` and
# `mod.endog` arrays
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
assert_equal(mod.endog.base is not mod.data.orig_endog, True)
assert_equal(mod.endog.base is not endog, True)
assert_equal(mod.data.orig_endog.base is not endog, True)
endog[0] = 2
# there is no link to mod.endog
assert_equal(mod.endog, np.r_[1, 2].reshape(2,1))
# there remains a link to mod.data.orig_endog
assert_equal(mod.data.orig_endog, endog)
# Check behavior with different memory layouts / shapes
# Example (failure): 0-dim array
endog = np.array(1.)
# raises error due to len(endog) failing in Statsmodels base classes
assert_raises(TypeError, check_endog, endog, **kwargs)
# Example : 1-dim array, both C- and F-contiguous, length 2
endog = np.array([1.,2.])
assert_equal(endog.ndim, 1)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (2,))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, long-shaped: (nobs, k_endog)
endog = np.array([1., 2.]).reshape(2, 1)
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['F_CONTIGUOUS'], False)
assert_equal(endog.shape, (2, 1))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, wide-shaped: (k_endog, nobs)
endog = np.array([1., 2.]).reshape(1, 2)
| assert_equal(endog.ndim, 2) | numpy.testing.assert_equal |
"""Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
class IcoSphere:
"""
Usage: IcoSphere(level)
Maximum supported level = 8
get started with:
>>> A = IcoSphere(3)
... A.plot3d()
"""
# maximum level for subdivision of the icosahedron
maxlevel = 8
def __init__(self, level):
if type(level) is not int:
raise TypeError('level must be an integer')
elif level < 0:
raise Exception('level must be no less than 0')
elif level > self.maxlevel:
raise Exception('level larger than ' + str(self.maxlevel) + ' not supported')
self.level = level
self.points = []
self.triangles = []
self.npts = 0
################################
# initialise level 1 icosahedron
################################
# golden ration
t = (1.0 + np.sqrt(5.0)) / 2.0
# add vertices
self._addPoint(np.array([-1, t, 0]))
self._addPoint(np.array([ 1, t, 0]))
self._addPoint(np.array([-1,-t, 0]))
self._addPoint(np.array([ 1,-t, 0]))
self._addPoint(np.array([ 0,-1, t]))
self._addPoint(np.array([ 0, 1, t]))
self._addPoint(np.array([ 0,-1,-t]))
self._addPoint(np.array([ 0, 1,-t]))
self._addPoint(np.array([ t, 0,-1]))
self._addPoint(np.array([ t, 0, 1]))
self._addPoint(np.array([-t, 0,-1]))
self._addPoint(np.array([-t, 0, 1]))
# make triangles
tris = self.triangles
verts = self.points
# 5 faces around point 0
tris.append(Triangle([ verts[0],verts[11], verts[5]]))
tris.append(Triangle([ verts[0], verts[5], verts[1]]))
tris.append(Triangle([ verts[0], verts[1], verts[7]]))
tris.append(Triangle([ verts[0], verts[7],verts[10]]))
tris.append(Triangle([ verts[0],verts[10],verts[11]]))
# 5 adjacent faces
tris.append(Triangle([ verts[1], verts[5], verts[9]]))
tris.append(Triangle([ verts[5],verts[11], verts[4]]))
tris.append(Triangle([verts[11],verts[10], verts[2]]))
tris.append(Triangle([verts[10], verts[7], verts[6]]))
tris.append(Triangle([ verts[7], verts[1], verts[8]]))
# 5 faces around point 3
tris.append(Triangle([ verts[3], verts[9], verts[4]]))
tris.append(Triangle([ verts[3], verts[4], verts[2]]))
tris.append(Triangle([ verts[3], verts[2], verts[6]]))
tris.append(Triangle([ verts[3], verts[6], verts[8]]))
tris.append(Triangle([ verts[3], verts[8], verts[9]]))
# 5 adjacent faces
tris.append(Triangle([ verts[4], verts[9], verts[5]]))
tris.append(Triangle([ verts[2], verts[4],verts[11]]))
tris.append(Triangle([ verts[6], verts[2],verts[10]]))
tris.append(Triangle([ verts[8], verts[6], verts[7]]))
tris.append(Triangle([ verts[9], verts[8], verts[1]]))
########################################
# refine triangles to desired mesh level
########################################
for l in range(self.level):
midPointDict = {}
faces = []
for tri in self.triangles:
# replace triangle by 4 triangles
p = tri.pts
a = self._getMiddlePoint(p[0], p[1], midPointDict)
b = self._getMiddlePoint(p[1], p[2], midPointDict)
c = self._getMiddlePoint(p[2], p[0], midPointDict)
faces.append(Triangle([p[0], a, c]))
faces.append(Triangle([p[1], b, a]))
faces.append(Triangle([p[2], c, b]))
faces.append(Triangle([a, b, c]))
# once looped thru all triangles overwrite self.triangles
self.triangles = faces
self.nfaces = len(self.triangles)
# check that npts and nfaces are as expected
expected_npts = calculate_npts(self.level)
expected_nfaces = calculate_nfaces(self.level)
if self.npts != calculate_npts(self.level):
raise Exception('npts '+str(self.npts)+' not as expected '+str(expected_npts))
elif self.nfaces != calculate_nfaces(self.level):
raise Exception('nfaces '+str(self.nfaces)+' not as expected '+str(expected_nfaces))
def _addPoint(self, xyz):
"""Add point to self.points"""
self.points.append(Point(self.npts, xyz))
self.npts += 1
def _getMiddlePoint(self, p1, p2, midPointDict):
"""return Point"""
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError('p1 and p2 must be Points')
# does point already exist?
key = tuple(sorted([p1.idx, p2.idx]))
if key in midPointDict:
# point exists
pass
else:
# point is new
self._addPoint((p1.xyz + p2.xyz)/2)
midPointDict[key] = self.points[-1]
return midPointDict[key]
def plot3d(self):
"""Matplotlib 3D plot of mesh"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = np.asarray([ pt.xyz for pt in self.points ])
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
ts = np.asarray([ [ p.idx for p in t.pts ] for t in self.triangles ])
ax.plot_trisurf(x,y,ts,z)
plt.show()
def dump_xyz(self):
[ print(*pt.xyz) for pt in self.points ]
def dump_latlonr(self):
[ print(*cart2geo(*pt.xyz)) for pt in self.points ]
class Triangle:
"""A triangle adjoining three adjacent points"""
def __init__(self, pts):
if not isinstance(pts, list):
raise TypeError('pts must be a list')
elif len(pts) !=3:
raise Exception('pts must be of length 3')
else:
self.pts = pts
class Point:
"""A 3D point on the mesh"""
def __init__(self, idx, xyz):
if type(idx) is not int:
raise TypeError('idx must be an integer')
elif not isinstance(xyz,np.ndarray):
raise TypeError('xyz must be a numpy array')
elif xyz.size != 3:
raise Exception('xyz must be of size 3')
else:
# ensure length equals 1 and add to list of points
self.xyz = (xyz/np.linalg.norm(xyz))
self.idx = idx
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = | np.sqrt(x**2 + y**2 + z**2) | numpy.sqrt |
#!/usr/bin/env python
import pytest
import os
import shutil
import json
import numpy as np
import cv2
import sys
import pandas as pd
from plotnine import ggplot
from plantcv import plantcv as pcv
import plantcv.learn
import plantcv.parallel
import plantcv.utils
# Import matplotlib and use a null Template to block plotting to screen
# This will let us test debug = "plot"
import matplotlib
import dask
from dask.distributed import Client
PARALLEL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parallel_data")
TEST_TMPDIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".cache")
TEST_IMG_DIR = "images"
TEST_IMG_DIR2 = "images_w_date"
TEST_SNAPSHOT_DIR = "snapshots"
TEST_PIPELINE = os.path.join(PARALLEL_TEST_DATA, "plantcv-script.py")
META_FIELDS = {"imgtype": 0, "camera": 1, "frame": 2, "zoom": 3, "lifter": 4, "gain": 5, "exposure": 6, "id": 7}
VALID_META = {
# Camera settings
"camera": {
"label": "camera identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"imgtype": {
"label": "image type",
"datatype": "<class 'str'>",
"value": "none"
},
"zoom": {
"label": "camera zoom setting",
"datatype": "<class 'str'>",
"value": "none"
},
"exposure": {
"label": "camera exposure setting",
"datatype": "<class 'str'>",
"value": "none"
},
"gain": {
"label": "camera gain setting",
"datatype": "<class 'str'>",
"value": "none"
},
"frame": {
"label": "image series frame identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"lifter": {
"label": "imaging platform height setting",
"datatype": "<class 'str'>",
"value": "none"
},
# Date-Time
"timestamp": {
"label": "datetime of image",
"datatype": "<class 'datetime.datetime'>",
"value": None
},
# Sample attributes
"id": {
"label": "image identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"plantbarcode": {
"label": "plant barcode identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"treatment": {
"label": "treatment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
"cartag": {
"label": "plant carrier identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Experiment attributes
"measurementlabel": {
"label": "experiment identifier",
"datatype": "<class 'str'>",
"value": "none"
},
# Other
"other": {
"label": "other identifier",
"datatype": "<class 'str'>",
"value": "none"
}
}
METADATA_COPROCESS = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_VIS_ONLY = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
METADATA_NIR_ONLY = {
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
# Set the temp directory for dask
dask.config.set(temporary_directory=TEST_TMPDIR)
# ##########################
# Tests setup function
# ##########################
def setup_function():
if not os.path.exists(TEST_TMPDIR):
os.mkdir(TEST_TMPDIR)
# ##############################
# Tests for the parallel subpackage
# ##############################
def test_plantcv_parallel_workflowconfig_save_config_file():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_save_config_file")
os.mkdir(cache_dir)
# Define output path/filename
template_file = os.path.join(cache_dir, "config.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Save template file
config.save_config(config_file=template_file)
assert os.path.exists(template_file)
def test_plantcv_parallel_workflowconfig_import_config_file():
# Define input path/filename
config_file = os.path.join(PARALLEL_TEST_DATA, "workflow_config_template.json")
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# import config file
config.import_config(config_file=config_file)
assert config.cluster == "LocalCluster"
def test_plantcv_parallel_workflowconfig_validate_config():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_validate_config")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
# Validate config
assert config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_startdate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_startdate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.start_date = "2020-05-10"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_enddate():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_enddate")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set valid values in config
config.input_dir = os.path.join(PARALLEL_TEST_DATA, "images")
config.json = os.path.join(cache_dir, "valid_config.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.end_date = "2020-05-10"
config.timestampformat = "%Y%m%d"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_metadata_terms():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_metadata_terms")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set an incorrect metadata term
config.filename_metadata.append("invalid")
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_filename_metadata():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_filename_metadata")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Do not set required filename_metadata
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_workflowconfig_invalid_cluster():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_workflowconfig_invalid_cluster")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
# Set invalid values in config
# input_dir and json are not defined by default, but are required
# Set invalid cluster type
config.cluster = "MyCluster"
# Validate config
assert not config.validate_config()
def test_plantcv_parallel_metadata_parser_snapshots():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_snapshots_coimg():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshots_coimg", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "FAKE"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_metadata_parser_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014"
config.end_date = "2014"
config.timestampformat = '%Y' # no date in filename so check date range and date_format are ignored
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
config.include_all_subdirs = False
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == expected
def test_plantcv_parallel_metadata_parser_regex():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.delimiter = r'(VIS)_(SV)_(\d+)_(z1)_(h1)_(g0)_(e82)_(\d+)'
meta = plantcv.parallel.metadata_parser(config=config)
expected = {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'images', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': None,
'id': '117770',
'plantbarcode': 'none',
'treatment': 'none',
'cartag': 'none',
'measurementlabel': 'none',
'other': 'none'}
}
assert meta == expected
def test_plantcv_parallel_metadata_parser_images_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_IMG_DIR2)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "timestamp"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "NIR"}
config.start_date = "1970-01-01 00_00_00"
config.end_date = "1970-01-01 00_00_00"
config.timestampformat = "%Y-%m-%d %H_%M_%S"
config.imgformat = "jpg"
config.delimiter = r"(NIR)_(SV)_(\d)_(z1)_(h1)_(g0)_(e65)_(\d{4}-\d{2}-\d{2} \d{2}_\d{2}_\d{2})"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_no_default_dates():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_no_default_dates", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS", "camera": "SV", "id": "117770"}
config.start_date = None
config.end_date = None
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_VIS_ONLY
def test_plantcv_parallel_check_date_range_wrongdateformat():
start_date = 10
end_date = 10
img_time = '2010-10-10'
with pytest.raises(SystemExit, match=r'does not match format'):
date_format = '%Y%m%d'
_ = plantcv.parallel.check_date_range(
start_date, end_date, img_time, date_format)
def test_plantcv_parallel_metadata_parser_snapshot_outside_daterange():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_snapshot_outside_daterange",
"output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {}
def test_plantcv_parallel_metadata_parser_fail_images():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_fail_images", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"cartag": "VIS"}
config.start_date = "1970-01-01 00:00:00.0"
config.end_date = "1970-01-01 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == METADATA_NIR_ONLY
def test_plantcv_parallel_metadata_parser_images_with_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_with_frame", "output.json")
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_frame():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame",
"output.json")
config.filename_metadata = ["imgtype", "camera", "X", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'SV',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'SV',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': 'none',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_metadata_parser_images_no_camera():
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_metadata_parser_images_no_frame", "output.json")
config.filename_metadata = ["imgtype", "X", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.metadata_filters = {"imgtype": "VIS"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.coprocess = "NIR"
meta = plantcv.parallel.metadata_parser(config=config)
assert meta == {
'VIS_SV_0_z1_h1_g0_e82_117770.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'VIS_SV_0_z1_h1_g0_e82_117770.jpg'),
'camera': 'none',
'imgtype': 'VIS',
'zoom': 'z1',
'exposure': 'e82',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117770',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none',
'coimg': 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'
},
'NIR_SV_0_z1_h1_g0_e65_117779.jpg': {
'path': os.path.join(PARALLEL_TEST_DATA, 'snapshots', 'snapshot57383', 'NIR_SV_0_z1_h1_g0_e65_117779.jpg'),
'camera': 'none',
'imgtype': 'NIR',
'zoom': 'z1',
'exposure': 'e65',
'gain': 'g0',
'frame': '0',
'lifter': 'h1',
'timestamp': '2014-10-22 17:49:35.187',
'id': '117779',
'plantbarcode': 'Ca031AA010564',
'treatment': 'none',
'cartag': '2143',
'measurementlabel': 'C002ch_092214_biomass',
'other': 'none'
}
}
def test_plantcv_parallel_job_builder_single_image():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_single_image")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
jobs = plantcv.parallel.job_builder(meta=METADATA_VIS_ONLY, config=config)
image_name = list(METADATA_VIS_ONLY.keys())[0]
result_file = os.path.join(cache_dir, image_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', METADATA_VIS_ONLY[image_name]['path'], '--outdir',
cache_dir, '--result', result_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_job_builder_coprocess():
# Create cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_job_builder_coprocess")
os.mkdir(cache_dir)
# Create config instance
config = plantcv.parallel.WorkflowConfig()
config.input_dir = os.path.join(PARALLEL_TEST_DATA, TEST_SNAPSHOT_DIR)
config.json = os.path.join(cache_dir, "output.json")
config.tmp_dir = cache_dir
config.filename_metadata = ["imgtype", "camera", "frame", "zoom", "lifter", "gain", "exposure", "id"]
config.workflow = TEST_PIPELINE
config.img_outdir = cache_dir
config.metadata_filters = {"imgtype": "VIS", "camera": "SV"}
config.start_date = "2014-10-21 00:00:00.0"
config.end_date = "2014-10-23 00:00:00.0"
config.timestampformat = '%Y-%m-%d %H:%M:%S.%f'
config.imgformat = "jpg"
config.other_args = ["--other", "on"]
config.writeimg = True
config.coprocess = "NIR"
jobs = plantcv.parallel.job_builder(meta=METADATA_COPROCESS, config=config)
img_names = list(METADATA_COPROCESS.keys())
vis_name = img_names[0]
vis_path = METADATA_COPROCESS[vis_name]['path']
result_file = os.path.join(cache_dir, vis_name + '.txt')
nir_name = img_names[1]
coresult_file = os.path.join(cache_dir, nir_name + '.txt')
expected = ['python', TEST_PIPELINE, '--image', vis_path, '--outdir', cache_dir, '--result', result_file,
'--coresult', coresult_file, '--writeimg', '--other', 'on']
if len(expected) != len(jobs[0]):
assert False
else:
assert all([i == j] for i, j in zip(jobs[0], expected))
def test_plantcv_parallel_multiprocess_create_dask_cluster_local():
client = plantcv.parallel.create_dask_cluster(cluster="LocalCluster", cluster_config={})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster():
client = plantcv.parallel.create_dask_cluster(cluster="HTCondorCluster", cluster_config={"cores": 1,
"memory": "1GB",
"disk": "1GB"})
status = client.status
client.shutdown()
assert status == "running"
def test_plantcv_parallel_multiprocess_create_dask_cluster_invalid_cluster():
with pytest.raises(ValueError):
_ = plantcv.parallel.create_dask_cluster(cluster="Skynet", cluster_config={})
def test_plantcv_parallel_convert_datetime_to_unixtime():
unix_time = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m-%d")
assert unix_time == 0
def test_plantcv_parallel_convert_datetime_to_unixtime_bad_strptime():
with pytest.raises(SystemExit):
_ = plantcv.parallel.convert_datetime_to_unixtime(timestamp_str="1970-01-01", date_format="%Y-%m")
def test_plantcv_parallel_multiprocess():
image_name = list(METADATA_VIS_ONLY.keys())[0]
image_path = os.path.join(METADATA_VIS_ONLY[image_name]['path'], image_name)
result_file = os.path.join(TEST_TMPDIR, image_name + '.txt')
jobs = [['python', TEST_PIPELINE, '--image', image_path, '--outdir', TEST_TMPDIR, '--result', result_file,
'--writeimg', '--other', 'on']]
# Create a dask LocalCluster client
client = Client(n_workers=1)
plantcv.parallel.multiprocess(jobs, client=client)
assert os.path.exists(result_file)
def test_plantcv_parallel_process_results():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'appended_results.json'))
# Assert that the output JSON file matches the expected output JSON file
result_file = open(os.path.join(cache_dir, "appended_results.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "appended_results.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_new_output():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_new_output")
os.mkdir(cache_dir)
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(cache_dir, 'new_result.json'))
# Assert output matches expected values
result_file = open(os.path.join(cache_dir, "new_result.json"), "r")
results = json.load(result_file)
result_file.close()
expected_file = open(os.path.join(PARALLEL_TEST_DATA, "new_result.json"))
expected = json.load(expected_file)
expected_file.close()
assert results == expected
def test_plantcv_parallel_process_results_valid_json():
# Test when the file is a valid json file but doesn't contain expected keys
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(PARALLEL_TEST_DATA, "results"),
json_file=os.path.join(PARALLEL_TEST_DATA, "valid.json"))
def test_plantcv_parallel_process_results_invalid_json():
# Create a test tmp directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_parallel_process_results_invalid_json")
os.mkdir(cache_dir)
# Move the test data to the tmp directory
shutil.copytree(os.path.join(PARALLEL_TEST_DATA, "bad_results"), os.path.join(cache_dir, "bad_results"))
with pytest.raises(RuntimeError):
plantcv.parallel.process_results(job_dir=os.path.join(cache_dir, "bad_results"),
json_file=os.path.join(cache_dir, "bad_results", "invalid.txt"))
# ####################################################################################################################
# ########################################### PLANTCV MAIN PACKAGE ###################################################
matplotlib.use('Template')
TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
HYPERSPECTRAL_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hyperspectral_data")
HYPERSPECTRAL_DATA = "darkReference"
HYPERSPECTRAL_WHITE = "darkReference_whiteReference"
HYPERSPECTRAL_DARK = "darkReference_darkReference"
HYPERSPECTRAL_HDR = "darkReference.hdr"
HYPERSPECTRAL_MASK = "darkReference_mask.png"
HYPERSPECTRAL_DATA_NO_DEFAULT = "darkReference2"
HYPERSPECTRAL_HDR_NO_DEFAULT = "darkReference2.hdr"
HYPERSPECTRAL_DATA_APPROX_PSEUDO = "darkReference3"
HYPERSPECTRAL_HDR_APPROX_PSEUDO = "darkReference3.hdr"
HYPERSPECTRAL_HDR_SMALL_RANGE = {'description': '{[HEADWALL Hyperspec III]}', 'samples': '800', 'lines': '1',
'bands': '978', 'header offset': '0', 'file type': 'ENVI Standard',
'interleave': 'bil', 'sensor type': 'Unknown', 'byte order': '0',
'default bands': '159,253,520', 'wavelength units': 'nm',
'wavelength': ['379.027', '379.663', '380.3', '380.936', '381.573', '382.209']}
FLUOR_TEST_DATA = os.path.join(os.path.dirname(os.path.abspath(__file__)), "photosynthesis_data")
FLUOR_IMG = "PSII_PSD_supopt_temp_btx623_22_rep1.DAT"
TEST_COLOR_DIM = (2056, 2454, 3)
TEST_GRAY_DIM = (2056, 2454)
TEST_BINARY_DIM = TEST_GRAY_DIM
TEST_INPUT_COLOR = "input_color_img.jpg"
TEST_INPUT_GRAY = "input_gray_img.jpg"
TEST_INPUT_GRAY_SMALL = "input_gray_img_small.jpg"
TEST_INPUT_BINARY = "input_binary_img.png"
# Image from http://www.libpng.org/pub/png/png-OwlAlpha.html
# This image may be used, edited and reproduced freely.
TEST_INPUT_RGBA = "input_rgba.png"
TEST_INPUT_BAYER = "bayer_img.png"
TEST_INPUT_ROI_CONTOUR = "input_roi_contour.npz"
TEST_INPUT_ROI_HIERARCHY = "input_roi_hierarchy.npz"
TEST_INPUT_CONTOURS = "input_contours.npz"
TEST_INPUT_OBJECT_CONTOURS = "input_object_contours.npz"
TEST_INPUT_OBJECT_HIERARCHY = "input_object_hierarchy.npz"
TEST_VIS = "VIS_SV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR = "NIR_SV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_VIS_TV = "VIS_TV_0_z300_h1_g0_e85_v500_93054.png"
TEST_NIR_TV = "NIR_TV_0_z300_h1_g0_e15000_v500_93059.png"
TEST_INPUT_MASK = "input_mask_binary.png"
TEST_INPUT_MASK_OOB = "mask_outbounds.png"
TEST_INPUT_MASK_RESIZE = "input_mask_resize.png"
TEST_INPUT_NIR_MASK = "input_nir.png"
TEST_INPUT_FDARK = "FLUO_TV_dark.png"
TEST_INPUT_FDARK_LARGE = "FLUO_TV_DARK_large"
TEST_INPUT_FMIN = "FLUO_TV_min.png"
TEST_INPUT_FMAX = "FLUO_TV_max.png"
TEST_INPUT_FMASK = "FLUO_TV_MASK.png"
TEST_INPUT_GREENMAG = "input_green-magenta.jpg"
TEST_INPUT_MULTI = "multi_ori_image.jpg"
TEST_INPUT_MULTI_MASK = "multi_ori_mask.jpg"
TEST_INPUT_MULTI_OBJECT = "roi_objects.npz"
TEST_INPUT_MULTI_CONTOUR = "multi_contours.npz"
TEST_INPUT_ClUSTER_CONTOUR = "clusters_i.npz"
TEST_INPUT_MULTI_HIERARCHY = "multi_hierarchy.npz"
TEST_INPUT_VISUALIZE_CONTOUR = "roi_objects_visualize.npz"
TEST_INPUT_VISUALIZE_HIERARCHY = "roi_obj_hierarchy_visualize.npz"
TEST_INPUT_VISUALIZE_CLUSTERS = "clusters_i_visualize.npz"
TEST_INPUT_VISUALIZE_BACKGROUND = "visualize_background_img.png"
TEST_INPUT_GENOTXT = "cluster_names.txt"
TEST_INPUT_GENOTXT_TOO_MANY = "cluster_names_too_many.txt"
TEST_INPUT_CROPPED = 'cropped_img.jpg'
TEST_INPUT_CROPPED_MASK = 'cropped-mask.png'
TEST_INPUT_MARKER = 'seed-image.jpg'
TEST_INPUT_SKELETON = 'input_skeleton.png'
TEST_INPUT_SKELETON_PRUNED = 'input_pruned_skeleton.png'
TEST_FOREGROUND = "TEST_FOREGROUND.jpg"
TEST_BACKGROUND = "TEST_BACKGROUND.jpg"
TEST_PDFS = "naive_bayes_pdfs.txt"
TEST_PDFS_BAD = "naive_bayes_pdfs_bad.txt"
TEST_VIS_SMALL = "setaria_small_vis.png"
TEST_MASK_SMALL = "setaria_small_mask.png"
TEST_VIS_COMP_CONTOUR = "setaria_composed_contours.npz"
TEST_ACUTE_RESULT = np.asarray([[[119, 285]], [[151, 280]], [[168, 267]], [[168, 262]], [[171, 261]], [[224, 269]],
[[246, 271]], [[260, 277]], [[141, 248]], [[183, 194]], [[188, 237]], [[173, 240]],
[[186, 260]], [[147, 244]], [[163, 246]], [[173, 268]], [[170, 272]], [[151, 320]],
[[195, 289]], [[228, 272]], [[210, 272]], [[209, 247]], [[210, 232]]])
TEST_VIS_SMALL_PLANT = "setaria_small_plant_vis.png"
TEST_MASK_SMALL_PLANT = "setaria_small_plant_mask.png"
TEST_VIS_COMP_CONTOUR_SMALL_PLANT = "setaria_small_plant_composed_contours.npz"
TEST_SAMPLED_RGB_POINTS = "sampled_rgb_points.txt"
TEST_TARGET_IMG = "target_img.png"
TEST_TARGET_IMG_WITH_HEXAGON = "target_img_w_hexagon.png"
TEST_TARGET_IMG_TRIANGLE = "target_img copy.png"
TEST_SOURCE1_IMG = "source1_img.png"
TEST_SOURCE2_IMG = "source2_img.png"
TEST_TARGET_MASK = "mask_img.png"
TEST_TARGET_IMG_COLOR_CARD = "color_card_target.png"
TEST_SOURCE2_MASK = "mask2_img.png"
TEST_TARGET_MATRIX = "target_matrix.npz"
TEST_SOURCE1_MATRIX = "source1_matrix.npz"
TEST_SOURCE2_MATRIX = "source2_matrix.npz"
TEST_MATRIX_B1 = "matrix_b1.npz"
TEST_MATRIX_B2 = "matrix_b2.npz"
TEST_TRANSFORM1 = "transformation_matrix1.npz"
TEST_MATRIX_M1 = "matrix_m1.npz"
TEST_MATRIX_M2 = "matrix_m2.npz"
TEST_S1_CORRECTED = "source_corrected.png"
TEST_SKELETON_OBJECTS = "skeleton_objects.npz"
TEST_SKELETON_HIERARCHIES = "skeleton_hierarchies.npz"
TEST_THERMAL_ARRAY = "thermal_img.npz"
TEST_THERMAL_IMG_MASK = "thermal_img_mask.png"
TEST_INPUT_THERMAL_CSV = "FLIR2600.csv"
PIXEL_VALUES = "pixel_inspector_rgb_values.txt"
# ##########################
# Tests for the main package
# ##########################
@pytest.mark.parametrize("debug", ["print", "plot"])
def test_plantcv_debug(debug, tmpdir):
from plantcv.plantcv._debug import _debug
# Create a test tmp directory
img_outdir = tmpdir.mkdir("sub")
pcv.params.debug = debug
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
_debug(visual=img, filename=os.path.join(img_outdir, TEST_INPUT_COLOR))
assert True
@pytest.mark.parametrize("datatype,value", [[list, []], [int, 2], [float, 2.2], [bool, True], [str, "2"], [dict, {}],
[tuple, ()], [None, None]])
def test_plantcv_outputs_add_observation(datatype, value):
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=datatype, value=value, label=[])
assert outputs.observations["default"]["test"]["value"] == value
def test_plantcv_outputs_add_observation_invalid_type():
# Create output instance
outputs = pcv.Outputs()
with pytest.raises(RuntimeError):
outputs.add_observation(sample='default', variable='test', trait='test variable', method='type', scale='none',
datatype=list, value=np.array([2]), label=[])
def test_plantcv_outputs_save_results_json_newfile(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_json_existing_file(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "data_results.txt")
shutil.copyfile(os.path.join(TEST_DATA, "data_results.txt"), outfile)
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='test', trait='test variable', method='test', scale='none',
datatype=str, value="test", label="none")
outputs.save_results(filename=outfile, outformat="json")
with open(outfile, "r") as fp:
results = json.load(fp)
assert results["observations"]["default"]["test"]["value"] == "test"
def test_plantcv_outputs_save_results_csv(tmpdir):
# Create a test tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.csv")
testfile = os.path.join(TEST_DATA, "data_results.csv")
# Create output instance
outputs = pcv.Outputs()
outputs.add_observation(sample='default', variable='string', trait='string variable', method='string', scale='none',
datatype=str, value="string", label="none")
outputs.add_observation(sample='default', variable='boolean', trait='boolean variable', method='boolean',
scale='none', datatype=bool, value=True, label="none")
outputs.add_observation(sample='default', variable='list', trait='list variable', method='list',
scale='none', datatype=list, value=[1, 2, 3], label=[1, 2, 3])
outputs.add_observation(sample='default', variable='tuple', trait='tuple variable', method='tuple',
scale='none', datatype=tuple, value=(1, 2), label=(1, 2))
outputs.add_observation(sample='default', variable='tuple_list', trait='list of tuples variable',
method='tuple_list', scale='none', datatype=list, value=[(1, 2), (3, 4)], label=[1, 2])
outputs.save_results(filename=outfile, outformat="csv")
with open(outfile, "r") as fp:
results = fp.read()
with open(testfile, "r") as fp:
test_results = fp.read()
assert results == test_results
def test_plantcv_transform_warp_smaller():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
bimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY),-1)
bimg_small = cv2.resize(bimg, (200,300)) #not sure why INTER_NEAREST doesn't preserve values
bimg_small[bimg_small>0]=255
mrow, mcol = bimg_small.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug = None
mask_warped = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
pcv.params.debug = 'plot'
mask_warped_plot = pcv.transform.warp(bimg_small, img[:,:,2],
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.count_nonzero(mask_warped)==93142
assert np.count_nonzero(mask_warped_plot)==93142
def test_plantcv_transform_warp_larger():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
pcv.params.debug='print'
mask_warped_print = pcv.transform.warp(gimg_large, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
assert np.sum(mask_warped_print)==83103814
def test_plantcv_transform_warp_rgbimgerror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
gimg = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY),-1)
gimg_large = cv2.resize(gimg, (5000,7000))
mrow, mcol = gimg_large.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img, img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
def test_plantcv_transform_warp_4ptserror():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR),-1)
mrow, mcol, _ = img.shape
vrow, vcol, vdepth = img.shape
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,0], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,1], img,
pts = [(0,0),(mcol-1,0),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1)])
with pytest.raises(RuntimeError):
_ = pcv.transform.warp(img[:,:,2], img,
pts = [(0,0),(mcol-1,0),(mcol-1,mrow-1),(0,mrow-1)],
refpts = [(0,0),(vcol-1,0),(vcol-1,vrow-1),(0,vrow-1),(0,vrow-1)])
def test_plantcv_acute():
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
_ = pcv.acute(obj=np.array(([[213, 190]], [[83, 61]], [[149, 246]])), win=84, thresh=192, mask=mask)
_ = pcv.acute(obj=np.array(([[3, 29]], [[31, 102]], [[161, 63]])), win=148, thresh=56, mask=mask)
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
# Test with debug = None
pcv.params.debug = None
_ = pcv.acute(obj=np.array(([[103, 154]], [[27, 227]], [[152, 83]])), win=35, thresh=0, mask=mask)
_ = pcv.acute(obj=obj_contour, win=0, thresh=15, mask=mask)
homology_pts = pcv.acute(obj=obj_contour, win=5, thresh=15, mask=mask)
assert all([i == j] for i, j in zip(np.shape(homology_pts), (29, 1, 2)))
def test_plantcv_acute_vertex():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img, label="prefix")
_ = pcv.acute_vertex(obj=[], win=5, thresh=15, sep=5, img=img)
_ = pcv.acute_vertex(obj=[], win=.01, thresh=.01, sep=1, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
# Test with debug = None
pcv.params.debug = None
acute = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(np.shape(acute), np.shape(TEST_ACUTE_RESULT)))
pcv.outputs.clear()
def test_plantcv_acute_vertex_bad_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.acute_vertex(obj=obj_contour, win=5, thresh=15, sep=5, img=img)
assert all([i == j] for i, j in zip(result, [0, ("NA", "NA")]))
pcv.outputs.clear()
def test_plantcv_analyze_bound_horizontal():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_above_bound_only = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=300, label="prefix")
pcv.outputs.clear()
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=100)
_ = pcv.analyze_bound_horizontal(img=img_above_bound_only, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(pcv.outputs.observations["default"]) == 7
def test_plantcv_analyze_bound_horizontal_grayscale_image():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
boundary_img1 = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=1756)
assert len(np.shape(boundary_img1)) == 3
def test_plantcv_analyze_bound_horizontal_neg_y():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_horizontal")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug=None, line position that will trigger -y
pcv.params.debug = "plot"
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=-1000)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=0)
_ = pcv.analyze_bound_horizontal(img=img, obj=object_contours, mask=mask, line_position=2056)
assert pcv.outputs.observations['default']['height_above_reference']['value'] == 713
def test_plantcv_analyze_bound_vertical():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
# Test with debug = None
pcv.params.debug = None
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
def test_plantcv_analyze_bound_vertical_grayscale_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with a grayscale reference image and debug="plot"
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1000)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 94
pcv.outputs.clear()
def test_plantcv_analyze_bound_vertical_neg_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug="plot", line position that will trigger -x
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=2454)
assert pcv.outputs.observations['default']['width_left_reference']['value'] == 441
def test_plantcv_analyze_bound_vertical_small_x():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_bound_vertical")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
object_contours = contours_npz['arr_0']
# Test with debug='plot', line position that will trigger -x, and two channel object
pcv.params.debug = "plot"
_ = pcv.analyze_bound_vertical(img=img, obj=object_contours, mask=mask, line_position=1)
assert pcv.outputs.observations['default']['width_right_reference']['value'] == 441
def test_plantcv_analyze_color():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = "print"
# pcv.params.debug = "print"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="all")
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None, label="prefix")
# Test with debug = "plot"
# pcv.params.debug = "plot"
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='lab')
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='hsv')
# _ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type=None)
# Test with debug = None
# pcv.params.debug = None
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='rgb')
assert pcv.outputs.observations['default']['hue_median']['value'] == 84.0
def test_plantcv_analyze_color_incorrect_image():
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img_binary, mask=mask, hist_plot_type=None)
#
#
def test_plantcv_analyze_color_bad_hist_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type='bgr')
def test_plantcv_analyze_color_incorrect_hist_plot_type():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.analyze_color(rgb_img=img, mask=mask, hist_plot_type="bgr")
def test_plantcv_analyze_nir():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=img, mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_nir_16bit():
# Clear previous outputs
pcv.outputs.clear()
# Test with debug=None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
_ = pcv.analyze_nir_intensity(gray_img=np.uint16(img), mask=mask, bins=256, histplot=True)
result = len(pcv.outputs.observations['default']['nir_frequencies']['value'])
assert result == 256
def test_plantcv_analyze_object():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
pcv.outputs.clear()
assert len(obj_images) != 0
def test_plantcv_analyze_object_grayscale_input():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_CONTOURS), encoding="latin1")
obj_contour = contours_npz['arr_0']
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 1
def test_plantcv_analyze_object_zero_slope():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:11, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[11, 10]], [[12, 10]], [[13, 10]], [[14, 10]], [[15, 10]], [[16, 10]],
[[17, 10]], [[18, 10]], [[19, 10]], [[20, 10]], [[21, 10]], [[22, 10]], [[23, 10]],
[[24, 10]], [[25, 10]], [[26, 10]], [[27, 10]], [[28, 10]], [[29, 10]], [[30, 10]],
[[31, 10]], [[32, 10]], [[33, 10]], [[34, 10]], [[35, 10]], [[36, 10]], [[37, 10]],
[[38, 10]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]], [[34, 10]],
[[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]], [[27, 10]],
[[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]], [[20, 10]],
[[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]], [[13, 10]],
[[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2d():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[0:5, 45:49, 0] = 255
img[0:5, 0:5, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[45, 1]], [[45, 2]], [[45, 3]], [[45, 4]], [[46, 4]], [[47, 4]], [[48, 4]],
[[48, 3]], [[48, 2]], [[48, 1]], [[47, 1]], [[46, 1]], [[1, 1]], [[1, 2]],
[[1, 3]], [[1, 4]], [[2, 4]], [[3, 4]], [[4, 4]], [[4, 3]], [[4, 2]],
[[4, 1]], [[3, 1]], [[2, 1]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_longest_axis_2e():
# Test with debug = None
pcv.params.debug = None
# Create a test image
img = np.zeros((50, 50, 3), dtype=np.uint8)
img[10:15, 10:40, 0] = 255
mask = img[:, :, 0]
obj_contour = np.array([[[10, 10]], [[10, 11]], [[10, 12]], [[10, 13]], [[10, 14]], [[11, 14]], [[12, 14]],
[[13, 14]], [[14, 14]], [[15, 14]], [[16, 14]], [[17, 14]], [[18, 14]], [[19, 14]],
[[20, 14]], [[21, 14]], [[22, 14]], [[23, 14]], [[24, 14]], [[25, 14]], [[26, 14]],
[[27, 14]], [[28, 14]], [[29, 14]], [[30, 14]], [[31, 14]], [[32, 14]], [[33, 14]],
[[34, 14]], [[35, 14]], [[36, 14]], [[37, 14]], [[38, 14]], [[39, 14]], [[39, 13]],
[[39, 12]], [[39, 11]], [[39, 10]], [[38, 10]], [[37, 10]], [[36, 10]], [[35, 10]],
[[34, 10]], [[33, 10]], [[32, 10]], [[31, 10]], [[30, 10]], [[29, 10]], [[28, 10]],
[[27, 10]], [[26, 10]], [[25, 10]], [[24, 10]], [[23, 10]], [[22, 10]], [[21, 10]],
[[20, 10]], [[19, 10]], [[18, 10]], [[17, 10]], [[16, 10]], [[15, 10]], [[14, 10]],
[[13, 10]], [[12, 10]], [[11, 10]]], dtype=np.int32)
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert len(obj_images) != 0
def test_plantcv_analyze_object_small_contour():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_contour = [np.array([[[0, 0]], [[0, 50]], [[50, 50]], [[50, 0]]], dtype=np.int32)]
obj_images = pcv.analyze_object(img=img, obj=obj_contour, mask=mask)
assert obj_images is None
def test_plantcv_analyze_thermal_values():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_thermal_values")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
# img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_THERMAL_IMG_MASK), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_THERMAL_ARRAY), encoding="latin1")
img = contours_npz['arr_0']
pcv.params.debug = None
thermal_hist = pcv.analyze_thermal_values(thermal_array=img, mask=mask, histplot=True)
assert thermal_hist is not None and pcv.outputs.observations['default']['median_temp']['value'] == 33.20922
def test_plantcv_apply_mask_white():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_white")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="white")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="white")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_black():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_black")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="black")
# Test with debug = None
pcv.params.debug = None
masked_img = pcv.apply_mask(img=img, mask=mask, mask_color="black")
assert all([i == j] for i, j in zip(np.shape(masked_img), TEST_COLOR_DIM))
def test_plantcv_apply_mask_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_apply_mask_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
hyper_array = pcv.hyperspectral.read_data(filename=spectral_filename)
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.apply_mask(img=img_stacked, mask=img, mask_color="black")
# Test with debug = "plot"
pcv.params.debug = "plot"
masked_array = pcv.apply_mask(img=hyper_array.array_data, mask=img, mask_color="black")
assert np.mean(masked_array) == 13.97111260224949
def test_plantcv_apply_mask_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.apply_mask(img=img, mask=mask, mask_color="wite")
def test_plantcv_auto_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=(20, 10), padding_y=(20, 10), color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], color='image')
_ = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=2000, padding_y=2000, color='image')
# Test with debug = None
pcv.params.debug = None
cropped = pcv.auto_crop(img=img1, obj=roi_contours[1], padding_x=20, padding_y=20, color='black')
x, y, z = np.shape(img1)
x1, y1, z1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_auto_crop_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='white')
x, y = np.shape(gray_img)
x1, y1 = np.shape(cropped)
assert x > x1
def test_plantcv_auto_crop_bad_color_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x=20, padding_y=20, color='wite')
def test_plantcv_auto_crop_bad_padding_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
roi_contours = [contours[arr_n] for arr_n in contours]
with pytest.raises(RuntimeError):
_ = pcv.auto_crop(img=gray_img, obj=roi_contours[1], padding_x="one", padding_y=20, color='white')
def test_plantcv_canny_edge_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.canny_edge_detect(img=rgb_img, mask=mask, mask_color='white')
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color='black')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.canny_edge_detect(img=img, thickness=2)
_ = pcv.canny_edge_detect(img=img)
# Test with debug = None
pcv.params.debug = None
edge_img = pcv.canny_edge_detect(img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(edge_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(edge_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_canny_edge_detect_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_canny_edge_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
with pytest.raises(RuntimeError):
_ = pcv.canny_edge_detect(img=img, mask=mask, mask_color="gray")
def test_plantcv_closing():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.closing(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.closing(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.closing(bin_img)
assert np.sum(filtered_img) == 16261860
def test_plantcv_closing_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.closing(rgb_img)
def test_plantcv_cluster_contours():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierarchy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierarchy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, show_grid=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierarchy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
roi_objects = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_OBJECT), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
objs = [roi_objects[arr_n] for arr_n in roi_objects]
obj_hierarchy = hierachy['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy, nrow=4, ncol=6)
# Test with debug = None
pcv.params.debug = None
clusters_i, contours, hierachy = pcv.cluster_contours(img=img1, roi_objects=objs, roi_obj_hierarchy=obj_hierarchy,
nrow=4, ncol=6)
lenori = len(objs)
lenclust = len(clusters_i)
assert lenori > lenclust
def test_plantcv_cluster_contours_splitimg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file=None, filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=[[0]], contours=[],
hierarchy=np.array([[[1, -1, -1, -1]]]))
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=cache_dir, file='multi', filenames=None)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None, filenames=cluster_names)
_, _, _ = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours,
hierarchy=obj_hierarchy, outdir=None, file=None,
filenames=cluster_names_too_many)
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_cluster_contours_splitimg_grayscale():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_cluster_contours_splitimg_grayscale")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), 0)
contours = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_CONTOUR), encoding="latin1")
clusters = np.load(os.path.join(TEST_DATA, TEST_INPUT_ClUSTER_CONTOUR), encoding="latin1")
hierachy = np.load(os.path.join(TEST_DATA, TEST_INPUT_MULTI_HIERARCHY), encoding="latin1")
cluster_names = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT)
cluster_names_too_many = os.path.join(TEST_DATA, TEST_INPUT_GENOTXT_TOO_MANY)
roi_contours = [contours[arr_n] for arr_n in contours]
cluster_contours = [clusters[arr_n] for arr_n in clusters]
obj_hierarchy = hierachy['arr_0']
pcv.params.debug = None
output_path, imgs, masks = pcv.cluster_contour_splitimg(img=img1, grouped_contour_indexes=cluster_contours,
contours=roi_contours, hierarchy=obj_hierarchy, outdir=None,
file=None,
filenames=None)
assert len(output_path) != 0
def test_plantcv_color_palette():
# Return a color palette
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_random():
# Return a color palette in random order
pcv.params.color_sequence = "random"
colors = pcv.color_palette(num=10, saved=False)
assert np.shape(colors) == (10, 3)
def test_plantcv_color_palette_saved():
# Return a color palette that was saved
pcv.params.saved_color_scale = [[0, 0, 0], [255, 255, 255]]
colors = pcv.color_palette(num=2, saved=True)
assert colors == [[0, 0, 0], [255, 255, 255]]
def test_plantcv_crop():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img, _, _ = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50)
def test_plantcv_crop_hyperspectral():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_hyperspectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = np.ones((2056, 2454))
img_stacked = cv2.merge((img, img, img, img))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
cropped = pcv.crop(img=img_stacked, x=10, y=10, h=50, w=50)
assert np.shape(cropped) == (50, 50, 4)
def test_plantcv_crop_position_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), 'gray')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_three_channel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=40, y=3, v_pos="top", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_three_channel, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_color():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_resize = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_RESIZE))
mask_non_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "print" with bottom
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="bottom", h_pos="left")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
# Test with debug = "plot" with bottom
_ = pcv.crop_position_mask(nir, mask, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="top", h_pos="left")
_ = pcv.crop_position_mask(nir, mask_non_binary, x=45, y=2, v_pos="bottom", h_pos="right")
_ = pcv.crop_position_mask(nir, mask_resize, x=45, y=2, v_pos="top", h_pos="left")
# Test with debug = None
pcv.params.debug = None
newmask = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="right")
assert np.sum(newmask) == 707115
def test_plantcv_crop_position_mask_bad_input_x():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=-1, y=-1, v_pos="top", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_vpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="below", h_pos="right")
def test_plantcv_crop_position_mask_bad_input_hpos():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_crop_position_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
# Read in test data
nir, path1, filename1 = pcv.readimage(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.crop_position_mask(nir, mask, x=40, y=3, v_pos="top", h_pos="starboard")
def test_plantcv_dilate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_dilate")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.dilate(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
dilate_img = pcv.dilate(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(dilate_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(dilate_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_dilate_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.dilate(img, 1, 1)
def test_plantcv_erode():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_erode")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.erode(gray_img=img, ksize=5, i=1)
# Test with debug = None
pcv.params.debug = None
erode_img = pcv.erode(gray_img=img, ksize=5, i=1)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(erode_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(erode_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_erode_small_k():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(ValueError):
_ = pcv.erode(img, 1, 1)
def test_plantcv_distance_transform():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_distance_transform")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Test with debug = None
pcv.params.debug = None
distance_transform_img = pcv.distance_transform(bin_img=mask, distance_type=1, mask_size=3)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(distance_transform_img), np.shape(mask)))
def test_plantcv_fatal_error():
# Verify that the fatal_error function raises a RuntimeError
with pytest.raises(RuntimeError):
pcv.fatal_error("Test error")
def test_plantcv_fill():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.fill(bin_img=img, size=63632)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill(bin_img=img, size=63632)
# Assert that the output image has the dimensions of the input image
# assert all([i == j] for i, j in zip(np.shape(fill_img), TEST_BINARY_DIM))
assert np.sum(fill_img) == 0
def test_plantcv_fill_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill(bin_img=img, size=1)
def test_plantcv_fill_holes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.fill_holes(bin_img=img)
pcv.params.debug = "plot"
_ = pcv.fill_holes(bin_img=img)
# Test with debug = None
pcv.params.debug = None
fill_img = pcv.fill_holes(bin_img=img)
assert np.sum(fill_img) > np.sum(img)
def test_plantcv_fill_holes_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_fill_holes_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.fill_holes(bin_img=img)
def test_plantcv_find_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.find_objects(img=img, mask=mask)
# Test with debug = None
pcv.params.debug = None
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_find_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_find_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, hierarchy = pcv.find_objects(img=img, mask=mask)
# Assert the correct number of contours are found
assert len(contours) == 2
def test_plantcv_flip():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_flip")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img_binary = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.flip(img=img, direction="horizontal")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.flip(img=img, direction="vertical")
_ = pcv.flip(img=img_binary, direction="vertical")
# Test with debug = None
pcv.params.debug = None
flipped_img = pcv.flip(img=img, direction="horizontal")
assert all([i == j] for i, j in zip(np.shape(flipped_img), TEST_COLOR_DIM))
def test_plantcv_flip_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.flip(img=img, direction="vert")
def test_plantcv_gaussian_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_gaussian_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
_ = pcv.gaussian_blur(img=img_color, ksize=(51, 51), sigma_x=0, sigma_y=None)
# Test with debug = None
pcv.params.debug = None
gaussian_img = pcv.gaussian_blur(img=img, ksize=(51, 51), sigma_x=0, sigma_y=None)
imgavg = np.average(img)
gavg = np.average(gaussian_img)
assert gavg != imgavg
def test_plantcv_get_kernel_cross():
kernel = pcv.get_kernel(size=(3, 3), shape="cross")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_rectangle():
kernel = pcv.get_kernel(size=(3, 3), shape="rectangle")
assert (kernel == np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])).all()
def test_plantcv_get_kernel_ellipse():
kernel = pcv.get_kernel(size=(3, 3), shape="ellipse")
assert (kernel == np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])).all()
def test_plantcv_get_kernel_bad_input_size():
with pytest.raises(ValueError):
_ = pcv.get_kernel(size=(1, 1), shape="ellipse")
def test_plantcv_get_kernel_bad_input_shape():
with pytest.raises(RuntimeError):
_ = pcv.get_kernel(size=(3, 1), shape="square")
def test_plantcv_get_nir_sv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR)
assert nirpath == nirpath1
def test_plantcv_get_nir_tv():
nirpath = pcv.get_nir(TEST_DATA, TEST_VIS_TV)
nirpath1 = os.path.join(TEST_DATA, TEST_NIR_TV)
assert nirpath == nirpath1
def test_plantcv_hist_equalization():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.hist_equalization(gray_img=img)
# Test with debug = None
pcv.params.debug = None
hist = pcv.hist_equalization(gray_img=img)
histavg = np.average(hist)
imgavg = np.average(img)
assert histavg != imgavg
def test_plantcv_hist_equalization_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hist_equalization_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), 1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.hist_equalization(gray_img=img)
def test_plantcv_image_add():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_add")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.image_add(gray_img1=img1, gray_img2=img2)
# Test with debug = None
pcv.params.debug = None
added_img = pcv.image_add(gray_img1=img1, gray_img2=img2)
assert all([i == j] for i, j in zip(np.shape(added_img), TEST_BINARY_DIM))
def test_plantcv_image_subtract():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_image_sub")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = 'print'
_ = pcv.image_subtract(img1, img2)
# Test with debug = "plot"
pcv.params.debug = 'plot'
_ = pcv.image_subtract(img1, img2)
# Test with debug = None
pcv.params.debug = None
new_img = pcv.image_subtract(img1, img2)
assert np.array_equal(new_img, np.zeros(np.shape(new_img), np.uint8))
def test_plantcv_image_subtract_fail():
# read in images
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY))
# test
with pytest.raises(RuntimeError):
_ = pcv.image_subtract(img1, img2)
def test_plantcv_invert():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_invert")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.invert(gray_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.invert(gray_img=img)
# Test with debug = None
pcv.params.debug = None
inverted_img = pcv.invert(gray_img=img)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(inverted_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(inverted_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_landmark_reference_pt_dist():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_landmark_reference")
os.mkdir(cache_dir)
points_rescaled = [(0.0139, 0.2569), (0.2361, 0.2917), (0.3542, 0.3819), (0.3542, 0.4167), (0.375, 0.4236),
(0.7431, 0.3681), (0.8958, 0.3542), (0.9931, 0.3125), (0.1667, 0.5139), (0.4583, 0.8889),
(0.4931, 0.5903), (0.3889, 0.5694), (0.4792, 0.4306), (0.2083, 0.5417), (0.3194, 0.5278),
(0.3889, 0.375), (0.3681, 0.3472), (0.2361, 0.0139), (0.5417, 0.2292), (0.7708, 0.3472),
(0.6458, 0.3472), (0.6389, 0.5208), (0.6458, 0.625)]
centroid_rescaled = (0.4685, 0.4945)
bottomline_rescaled = (0.4685, 0.2569)
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=('a', 'b'), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=[(10, 1000)], centroid_r=(10, 10), bline_r=(10, 10))
_ = pcv.landmark_reference_pt_dist(points_r=[], centroid_r=(0, 0), bline_r=(0, 0))
_ = pcv.landmark_reference_pt_dist(points_r=points_rescaled, centroid_r=centroid_rescaled,
bline_r=bottomline_rescaled, label="prefix")
assert len(pcv.outputs.observations['prefix'].keys()) == 8
def test_plantcv_laplace_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_laplace_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Test with debug = None
pcv.params.debug = None
lp_img = pcv.laplace_filter(gray_img=img, ksize=1, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(lp_img), TEST_GRAY_DIM))
def test_plantcv_logical_and():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_and")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_and(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
and_img = pcv.logical_and(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(and_img), TEST_BINARY_DIM))
def test_plantcv_logical_or():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_or")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_or(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
or_img = pcv.logical_or(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(or_img), TEST_BINARY_DIM))
def test_plantcv_logical_xor():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_logical_xor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img1 = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
img2 = np.copy(img1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
# Test with debug = None
pcv.params.debug = None
xor_img = pcv.logical_xor(bin_img1=img1, bin_img2=img2)
assert all([i == j] for i, j in zip(np.shape(xor_img), TEST_BINARY_DIM))
def test_plantcv_median_blur():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.median_blur(gray_img=img, ksize=5)
# Test with debug = None
pcv.params.debug = None
blur_img = pcv.median_blur(gray_img=img, ksize=5)
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(blur_img), TEST_BINARY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(blur_img), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_median_blur_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_median_blur_bad_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
with pytest.raises(RuntimeError):
_ = pcv.median_blur(img, 5.)
def test_plantcv_naive_bayes_classifier():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_naive_bayes_classifier")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Test with debug = None
pcv.params.debug = None
mask = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS))
# Assert that the output image has the dimensions of the input image
if all([i == j] for i, j in zip(np.shape(mask), TEST_GRAY_DIM)):
# Assert that the image is binary
if all([i == j] for i, j in zip(np.unique(mask), [0, 255])):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_naive_bayes_classifier_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.naive_bayes_classifier(rgb_img=img, pdf_file=os.path.join(TEST_DATA, TEST_PDFS_BAD))
def test_plantcv_object_composition():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
_ = pcv.object_composition(img=img, contours=[], hierarchy=object_hierarchy)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Test with debug = None
pcv.params.debug = None
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_object_composition_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_object_composition_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
contours, mask = pcv.object_composition(img=img, contours=object_contours, hierarchy=object_hierarchy)
# Assert that the objects have been combined
contour_shape = np.shape(contours) # type: tuple
assert contour_shape[1] == 1
def test_plantcv_within_frame():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask_ib = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK), -1)
mask_oob = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MASK_OOB), -1)
in_bounds_ib = pcv.within_frame(mask=mask_ib, border_width=1, label="prefix")
in_bounds_oob = pcv.within_frame(mask=mask_oob, border_width=1)
assert (in_bounds_ib is True and in_bounds_oob is False)
def test_plantcv_within_frame_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_within_frame")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
grayscale_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
with pytest.raises(RuntimeError):
_ = pcv.within_frame(grayscale_img)
def test_plantcv_opening():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_closing")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
bin_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug=None
pcv.params.debug = None
_ = pcv.opening(gray_img)
# Test with debug='plot'
pcv.params.debug = 'plot'
_ = pcv.opening(bin_img, np.ones((4, 4), np.uint8))
# Test with debug='print'
pcv.params.debug = 'print'
filtered_img = pcv.opening(bin_img)
assert np.sum(filtered_img) == 16184595
def test_plantcv_opening_bad_input():
# Read in test data
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI), -1)
with pytest.raises(RuntimeError):
_ = pcv.opening(rgb_img)
def test_plantcv_output_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=False)
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=None, mask_only=False)
# Remove tmp files in working direcctory
shutil.rmtree("ori-images")
shutil.rmtree("mask-images")
# Test with debug = None
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png',
outdir=cache_dir, mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_output_mask_true():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_output_mask")
pcv.params.debug_outdir = cache_dir
os.mkdir(cache_dir)
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.output_mask(img=img_color, mask=mask, filename='test.png', outdir=cache_dir, mask_only=True)
pcv.params.debug = None
imgpath, maskpath, analysis_images = pcv.output_mask(img=img, mask=mask, filename='test.png', outdir=cache_dir,
mask_only=False)
assert all([os.path.exists(imgpath) is True, os.path.exists(maskpath) is True])
def test_plantcv_plot_image_matplotlib_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_pseudocolor")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pimg = pcv.visualize.pseudocolor(gray_img=img, mask=mask, min_value=10, max_value=200)
with pytest.raises(RuntimeError):
pcv.plot_image(pimg)
def test_plantcv_plot_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_plot_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
try:
pcv.plot_image(img=img)
except RuntimeError:
assert False
# Assert that the image was plotted without error
assert True
def test_plantcv_print_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_image_bad_type():
with pytest.raises(RuntimeError):
pcv.print_image(img=[], filename="/dev/null")
def test_plantcv_print_image_plotnine():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_print_image_plotnine")
os.mkdir(cache_dir)
dataset = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 2, 3, 4]})
img = ggplot(data=dataset)
filename = os.path.join(cache_dir, 'plantcv_print_image.png')
pcv.print_image(img=img, filename=filename)
# Assert that the file was created
assert os.path.exists(filename) is True
def test_plantcv_print_results(tmpdir):
# Create a tmp directory
cache_dir = tmpdir.mkdir("sub")
outfile = os.path.join(cache_dir, "results.json")
pcv.print_results(filename=outfile)
assert os.path.exists(outfile)
def test_plantcv_readimage_native():
# Test with debug = None
pcv.params.debug = None
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='rgba')
_ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR))
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_COLOR), mode='native')
# Assert that the image name returned equals the name of the input image
# Assert that the path of the image returned equals the path of the input image
# Assert that the dimensions of the returned image equals the expected dimensions
if img_name == TEST_INPUT_COLOR and path == TEST_DATA:
if all([i == j] for i, j in zip(np.shape(img), TEST_COLOR_DIM)):
assert 1
else:
assert 0
else:
assert 0
def test_plantcv_readimage_grayscale():
# Test with debug = None
pcv.params.debug = None
_, _, _ = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="grey")
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="gray")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_GRAY), mode="rgb")
assert len(np.shape(img)) == 3
def test_plantcv_readimage_rgba_as_rgb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_RGBA), mode="native")
assert np.shape(img)[2] == 3
def test_plantcv_readimage_csv():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readimage(filename=os.path.join(TEST_DATA, TEST_INPUT_THERMAL_CSV), mode="csv")
assert len(np.shape(img)) == 2
def test_plantcv_readimage_envi():
# Test with debug = None
pcv.params.debug = None
array_data = pcv.readimage(filename=os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA), mode="envi")
if sys.version_info[0] < 3:
assert len(array_data.array_type) == 8
def test_plantcv_readimage_bad_file():
with pytest.raises(RuntimeError):
_ = pcv.readimage(filename=TEST_INPUT_COLOR)
def test_plantcv_readbayer_default_bg():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_readbayer_default_bg")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
# Test with debug = "plot"
pcv.params.debug = "plot"
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="default")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_edgeaware_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="edgeaware")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_bg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="BG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gb():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GB", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_rg():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="RG", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_variablenumbergradients_gr():
# Test with debug = None
pcv.params.debug = None
img, path, img_name = pcv.readbayer(filename=os.path.join(TEST_DATA, TEST_INPUT_BAYER),
bayerpattern="GR", alg="variablenumbergradients")
assert all([i == j] for i, j in zip(np.shape(img), (335, 400, 3)))
def test_plantcv_readbayer_default_bad_input():
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_, _, _ = pcv.readbayer(filename=os.path.join(TEST_DATA, "no-image.png"), bayerpattern="GR", alg="default")
def test_plantcv_rectangle_mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
img_color = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="white")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rectangle_mask(img=img_color, p1=(0, 0), p2=(2454, 2056), color="gray")
# Test with debug = None
pcv.params.debug = None
masked, hist, contour, heir = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="black")
maskedsum = np.sum(masked)
imgsum = np.sum(img)
assert maskedsum < imgsum
def test_plantcv_rectangle_mask_bad_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rectangle_mask")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = None
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rectangle_mask(img=img, p1=(0, 0), p2=(2454, 2056), color="whit")
def test_plantcv_report_size_marker_detect():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_report_size_marker_detect")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel='s', thresh=120)
pcv.outputs.clear()
assert len(images) != 0
def test_plantcv_report_size_marker_define():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_grayscale_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# ROI contour
roi_contour = [np.array([[[0, 0]], [[0, 49]], [[49, 49]], [[49, 0]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
# Test with debug = None
pcv.params.debug = None
images = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='define',
objcolor='light', thresh_channel='s', thresh=120)
assert len(images) != 0
def test_plantcv_report_size_marker_bad_marker_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='none',
objcolor='light', thresh_channel='s', thresh=120)
def test_plantcv_report_size_marker_bad_threshold_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER), -1)
# ROI contour
roi_contour = [np.array([[[3550, 850]], [[3550, 1349]], [[4049, 1349]], [[4049, 850]]], dtype=np.int32)]
roi_hierarchy = np.array([[[-1, -1, -1, -1]]], dtype=np.int32)
with pytest.raises(RuntimeError):
_ = pcv.report_size_marker_area(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy, marker='detect',
objcolor='light', thresh_channel=None, thresh=120)
def test_plantcv_rgb2gray_cmyk():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
c = pcv.rgb2gray_cmyk(rgb_img=img, channel="c")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(c), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_cmyk_bad_channel():
# Test with debug = None
pcv.params.debug = None
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
# Channel S is not in CMYK
_ = pcv.rgb2gray_cmyk(rgb_img=img, channel="s")
def test_plantcv_rgb2gray_hsv():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_hsv")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Test with debug = None
pcv.params.debug = None
s = pcv.rgb2gray_hsv(rgb_img=img, channel="s")
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(s), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_hsv_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_hsv(rgb_img=img, channel="l")
def test_plantcv_rgb2gray_lab():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray_lab")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Test with debug = None
pcv.params.debug = None
b = pcv.rgb2gray_lab(rgb_img=img, channel='b')
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(b), TEST_GRAY_DIM))
def test_plantcv_rgb2gray_lab_bad_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.rgb2gray_lab(rgb_img=img, channel="v")
def test_plantcv_rgb2gray():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rgb2gray")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.rgb2gray(rgb_img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.rgb2gray(rgb_img=img)
# Test with debug = None
pcv.params.debug = None
gray = pcv.rgb2gray(rgb_img=img)
# Assert that the output image has the dimensions of the input image but is only a single channel
assert all([i == j] for i, j in zip(np.shape(gray), TEST_GRAY_DIM))
def test_plantcv_roi2mask():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_acute_vertex")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "plot"
_ = pcv.roi.roi2mask(img=img, contour=obj_contour)
pcv.params.debug = "print"
mask = pcv.roi.roi2mask(img=img, contour=obj_contour)
assert np.shape(mask)[0:2] == np.shape(img)[0:2] and np.sum(mask) == 255
def test_plantcv_roi_objects():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="largest")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="partial")
# Test with debug = None and roi_type = cutto
pcv.params.debug = None
_ = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy, roi_type="cutto")
# Test with debug = None
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy, roi_type="partial")
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_roi_objects_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.roi_objects(img=img, roi_type="cut", roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=object_contours, obj_hierarchy=object_hierarchy)
def test_plantcv_roi_objects_grayscale_input():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_objects_grayscale_input")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR), 0)
roi_contour_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_CONTOUR), encoding="latin1")
roi_contour = [roi_contour_npz[arr_n] for arr_n in roi_contour_npz]
roi_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_ROI_HIERARCHY), encoding="latin1")
roi_hierarchy = roi_hierarchy_npz['arr_0']
object_contours_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_CONTOURS), encoding="latin1")
object_contours = [object_contours_npz[arr_n] for arr_n in object_contours_npz]
object_hierarchy_npz = np.load(os.path.join(TEST_DATA, TEST_INPUT_OBJECT_HIERARCHY), encoding="latin1")
object_hierarchy = object_hierarchy_npz['arr_0']
# Test with debug = "plot"
pcv.params.debug = "plot"
kept_contours, kept_hierarchy, mask, area = pcv.roi_objects(img=img, roi_type="partial", roi_contour=roi_contour,
roi_hierarchy=roi_hierarchy,
object_contour=object_contours,
obj_hierarchy=object_hierarchy)
# Assert that the contours were filtered as expected
assert len(kept_contours) == 1891
def test_plantcv_rotate():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
rotated = pcv.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_rotate_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_transform_rotate_gray():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
# Test with debug = None
pcv.params.debug = None
rotated = pcv.transform.rotate(img=img, rotation_deg=45, crop=False)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
def test_plantcv_scale_features():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scale_features")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position='NA')
# Test with debug = None
pcv.params.debug = None
points_rescaled, centroid_rescaled, bottomline_rescaled = pcv.scale_features(obj=obj_contour, mask=mask,
points=TEST_ACUTE_RESULT,
line_position=50)
assert len(points_rescaled) == 23
def test_plantcv_scale_features_bad_input():
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.scale_features(obj=obj_contour, mask=mask, points=TEST_ACUTE_RESULT, line_position=50)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_scharr_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_scharr_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
pcv.params.debug = "print"
# Test with debug = "print"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Test with debug = None
pcv.params.debug = None
scharr_img = pcv.scharr_filter(img=img, dx=1, dy=0, scale=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(scharr_img), TEST_GRAY_DIM))
def test_plantcv_shift_img():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_shift_img")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="top")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="bottom")
# Test with debug = "plot"
_ = pcv.shift_img(img=img, number=300, side="right")
# Test with debug = "plot"
_ = pcv.shift_img(img=mask, number=300, side="left")
# Test with debug = None
pcv.params.debug = None
rotated = pcv.shift_img(img=img, number=300, side="top")
imgavg = np.average(img)
shiftavg = np.average(rotated)
assert shiftavg != imgavg
def test_plantcv_shift_img_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=-300, side="top")
def test_plantcv_shift_img_bad_side_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
with pytest.raises(RuntimeError):
pcv.params.debug = None
_ = pcv.shift_img(img=img, number=300, side="starboard")
def test_plantcv_sobel_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Test with debug = None
pcv.params.debug = None
sobel_img = pcv.sobel_filter(gray_img=img, dx=1, dy=0, ksize=1)
# Assert that the output image has the dimensions of the input image
assert all([i == j] for i, j in zip(np.shape(sobel_img), TEST_GRAY_DIM))
def test_plantcv_stdev_filter():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_sobel_filter")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY_SMALL), -1)
pcv.params.debug = "plot"
_ = pcv.stdev_filter(img=img, ksize=11)
pcv.params.debug = "print"
filter_img = pcv.stdev_filter(img=img, ksize=11)
assert (np.shape(filter_img) == np.shape(img))
def test_plantcv_watershed_segmentation():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_watershed_segmentation")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_CROPPED_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
# Test with debug = None
pcv.params.debug = None
_ = pcv.watershed_segmentation(rgb_img=img, mask=mask, distance=10)
assert pcv.outputs.observations['default']['estimated_object_count']['value'] > 9
def test_plantcv_white_balance_gray_16bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_16bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_gray_8bit():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_gray_8bit")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_rgb():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_white_balance_rgb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 80, 80))
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='max', roi=(5, 5, 80, 80))
# Test without an ROI
pcv.params.debug = None
_ = pcv.white_balance(img=img, mode='hist', roi=None)
# Test with debug = None
white_balanced = pcv.white_balance(img=img, roi=(5, 5, 80, 80))
imgavg = np.average(img)
balancedavg = np.average(white_balanced)
assert balancedavg != imgavg
def test_plantcv_white_balance_bad_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5, 5, 5, 5, 5))
def test_plantcv_white_balance_bad_mode_input():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MARKER))
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='histogram', roi=(5, 5, 80, 80))
def test_plantcv_white_balance_bad_input_int():
# Read in test data
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_NIR_MASK), -1)
# Test with debug = None
with pytest.raises(RuntimeError):
pcv.params.debug = "plot"
_ = pcv.white_balance(img=img, mode='hist', roi=(5., 5, 5, 5))
def test_plantcv_x_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_x_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[0, 0], [0, 0]]), mask=np.array([[0, 0], [0, 0]]), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.x_axis_pseudolandmarks(obj=(), mask=mask, img=img)
# Test with debug = None
pcv.params.debug = None
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_small_obj():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _, _ = pcv.x_axis_pseudolandmarks(obj=[], mask=mask, img=img)
top, bottom, center_v = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([all([i == j] for i, j in zip(np.shape(top), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(bottom), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_v), (20, 1, 2)))])
def test_plantcv_x_axis_pseudolandmarks_bad_input():
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.x_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_x_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.x_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_y_axis_pseudolandmarks():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR), encoding="latin1")
obj_contour = contours_npz['arr_0']
pcv.params.debug = "print"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
_ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
_ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=(), mask=mask, img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[89, 222]], [[252, 39]], [[89, 207]])),
mask=np.array(([[42, 161]], [[2, 47]], [[211, 222]])), img=img)
_ = pcv.y_axis_pseudolandmarks(obj=np.array(([[21, 11]], [[159, 155]], [[237, 11]])),
mask=np.array(([[38, 54]], [[144, 169]], [[81, 137]])), img=img)
# Test with debug = None
pcv.params.debug = None
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_small_obj():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
mask = cv2.imread(os.path.join(TEST_DATA, TEST_MASK_SMALL_PLANT), -1)
contours_npz = np.load(os.path.join(TEST_DATA, TEST_VIS_COMP_CONTOUR_SMALL_PLANT), encoding="latin1")
obj_contour = contours_npz['arr_0']
# Test with debug = "print"
pcv.params.debug = "print"
_, _, _ = pcv.y_axis_pseudolandmarks(obj=[], mask=mask, img=img)
_, _, _ = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
# Test with debug = "plot"
pcv.params.debug = "plot"
pcv.outputs.clear()
left, right, center_h = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([all([i == j] for i, j in zip(np.shape(left), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(right), (20, 1, 2))),
all([i == j] for i, j in zip(np.shape(center_h), (20, 1, 2)))])
def test_plantcv_y_axis_pseudolandmarks_bad_input():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_y_axis_pseudolandmarks_debug")
os.mkdir(cache_dir)
img = np.array([])
mask = np.array([])
obj_contour = np.array([])
pcv.params.debug = None
result = pcv.y_axis_pseudolandmarks(obj=obj_contour, mask=mask, img=img)
pcv.outputs.clear()
assert all([i == j] for i, j in zip(result, [("NA", "NA"), ("NA", "NA"), ("NA", "NA")]))
def test_plantcv_y_axis_pseudolandmarks_bad_obj_input():
img = cv2.imread(os.path.join(TEST_DATA, TEST_VIS_SMALL_PLANT))
with pytest.raises(RuntimeError):
_ = pcv.y_axis_pseudolandmarks(obj=np.array([[-2, -2], [-2, -2]]), mask=np.array([[-2, -2], [-2, -2]]), img=img)
def test_plantcv_background_subtraction():
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
big_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Testing if background subtraction is actually still working.
# This should return an array whose sum is greater than one
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
fgmask = pcv.background_subtraction(background_image=big_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) > 0)
# The same foreground subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=fg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) == 0)
# The same background subtracted from itself should be 0
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=bg_img)
truths.append(np.sum(fgmask) == 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_debug():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_background_subtraction_debug")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# List to hold result of all tests.
truths = []
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
# Test with debug = "print"
pcv.params.debug = "print"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# Test with debug = "plot"
pcv.params.debug = "plot"
fgmask = pcv.background_subtraction(background_image=bg_img, foreground_image=fg_img)
truths.append(np.sum(fgmask) > 0)
# All of these should be true for the function to pass testing.
assert (all(truths))
def test_plantcv_background_subtraction_bad_img_type():
fg_color = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_gray = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND), 0)
pcv.params.debug = None
with pytest.raises(RuntimeError):
_ = pcv.background_subtraction(background_image=bg_gray, foreground_image=fg_color)
def test_plantcv_background_subtraction_different_sizes():
fg_img = cv2.imread(os.path.join(TEST_DATA, TEST_FOREGROUND))
bg_img = cv2.imread(os.path.join(TEST_DATA, TEST_BACKGROUND))
bg_shp = np.shape(bg_img) # type: tuple
bg_img_resized = cv2.resize(bg_img, (int(bg_shp[0] / 2), int(bg_shp[1] / 2)), interpolation=cv2.INTER_AREA)
pcv.params.debug = None
fgmask = pcv.background_subtraction(background_image=bg_img_resized, foreground_image=fg_img)
assert np.sum(fgmask) > 0
def test_plantcv_spatial_clustering_dbscan():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_dbscan")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = "print"
_ = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
pcv.params.debug = "plot"
spmask = pcv.spatial_clustering(img, algorithm="DBSCAN", min_cluster_size=10, max_distance=None)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_optics():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_spatial_clustering_optics")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
spmask = pcv.spatial_clustering(img, algorithm="OPTICS", min_cluster_size=100, max_distance=5000)
assert len(spmask[1]) == 2
def test_plantcv_spatial_clustering_badinput():
img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_MULTI_MASK), -1)
pcv.params.debug = None
with pytest.raises(NameError):
_ = pcv.spatial_clustering(img, algorithm="Hydra", min_cluster_size=5, max_distance=100)
# ##############################
# Tests for the learn subpackage
# ##############################
def test_plantcv_learn_naive_bayes():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes")
os.mkdir(cache_dir)
# Make image and mask directories in the cache directory
imgdir = os.path.join(cache_dir, "images")
maskdir = os.path.join(cache_dir, "masks")
if not os.path.exists(imgdir):
os.mkdir(imgdir)
if not os.path.exists(maskdir):
os.mkdir(maskdir)
# Copy and image and mask to the image/mask directories
shutil.copyfile(os.path.join(TEST_DATA, TEST_VIS_SMALL), os.path.join(imgdir, "image.png"))
shutil.copyfile(os.path.join(TEST_DATA, TEST_MASK_SMALL), os.path.join(maskdir, "image.png"))
# Run the naive Bayes training module
outfile = os.path.join(cache_dir, "naive_bayes_pdfs.txt")
plantcv.learn.naive_bayes(imgdir=imgdir, maskdir=maskdir, outfile=outfile, mkplots=True)
assert os.path.exists(outfile)
def test_plantcv_learn_naive_bayes_multiclass():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_learn_naive_bayes_multiclass")
os.mkdir(cache_dir)
# Run the naive Bayes multiclass training module
outfile = os.path.join(cache_dir, "naive_bayes_multiclass_pdfs.txt")
plantcv.learn.naive_bayes_multiclass(samples_file=os.path.join(TEST_DATA, TEST_SAMPLED_RGB_POINTS), outfile=outfile,
mkplots=True)
assert os.path.exists(outfile)
# ####################################
# Tests for the morphology subpackage
# ####################################
def test_plantcv_morphology_segment_curvature():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_curvature")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects, label="prefix")
pcv.params.debug = "plot"
pcv.outputs.clear()
_ = pcv.morphology.segment_curvature(segmented_img, seg_objects)
assert len(pcv.outputs.observations['default']['segment_curvature']['value']) == 22
def test_plantcv_morphology_check_cycles():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
pcv.params.debug = "print"
_ = pcv.morphology.check_cycles(mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.check_cycles(mask)
pcv.params.debug = None
_ = pcv.morphology.check_cycles(mask)
assert pcv.outputs.observations['default']['num_cycles']['value'] == 1
def test_plantcv_morphology_find_branch_pts():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_branches")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_branch_pts(skel_img=skeleton)
pcv.params.debug = None
branches = pcv.morphology.find_branch_pts(skel_img=skeleton)
assert np.sum(branches) == 9435
def test_plantcv_morphology_find_tips():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_tips")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.find_tips(skel_img=skeleton, mask=mask, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.find_tips(skel_img=skeleton)
pcv.params.debug = None
tips = pcv.morphology.find_tips(skel_img=skeleton)
assert np.sum(tips) == 9435
def test_plantcv_morphology_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.prune(skel_img=skeleton, size=1)
pcv.params.debug = "plot"
_ = pcv.morphology.prune(skel_img=skeleton, size=1, mask=skeleton)
pcv.params.debug = None
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_prune_size0():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img, _, _ = pcv.morphology.prune(skel_img=skeleton, size=0)
assert np.sum(pruned_img) == np.sum(skeleton)
def test_plantcv_morphology_iterative_prune():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_pruned")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned_img = pcv.morphology._iterative_prune(skel_img=skeleton, size=3)
assert np.sum(pruned_img) < np.sum(skeleton)
def test_plantcv_morphology_segment_skeleton():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_skeleton")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.segment_skeleton(skel_img=skeleton, mask=mask)
pcv.params.debug = "plot"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
assert len(segment_objects) == 73
def test_plantcv_morphology_fill_segments():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_fill_segments")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
pcv.params.debug = "print"
_ = pcv.morphology.fill_segments(mask, obj, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.fill_segments(mask, obj)
tests = [pcv.outputs.observations['default']['segment_area']['value'][42] == 5529,
pcv.outputs.observations['default']['segment_area']['value'][20] == 5057,
pcv.outputs.observations['default']['segment_area']['value'][49] == 3323]
assert all(tests)
def test_plantcv_morphology_fill_segments_with_stem():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_fill_segments")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
obj_dic = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS))
obj = []
for key, val in obj_dic.items():
obj.append(val)
stem_obj = obj[0:4]
pcv.params.debug = "print"
_ = pcv.morphology.fill_segments(mask, obj, stem_obj)
num_objects = len(pcv.outputs.observations['default']['leaf_area']['value'])
assert num_objects == 70
def test_plantcv_morphology_segment_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img=segmented_img, objects=segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 22
def test_plantcv_morphology_segment_angle_overflow():
# Clear previous outputs
pcv.outputs.clear()
# Don't prune, would usually give overflow error without extra if statement in segment_angle
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_angles")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_angle(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_angle']['value']) == 73
def test_plantcv_morphology_segment_euclidean_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_eu_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_eu_length']['value']) == 22
def test_plantcv_morphology_segment_euclidean_length_bad_input():
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
skel = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skel)
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_euclidean_length(segmented_img, segment_objects)
def test_plantcv_morphology_segment_path_length():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_path_length")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
pcv.params.debug = "print"
segmented_img, segment_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_path_length(segmented_img, segment_objects)
assert len(pcv.outputs.observations['default']['segment_path_length']['value']) == 22
def test_plantcv_morphology_skeletonize():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_skeletonize")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
mask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_BINARY), -1)
input_skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pcv.params.debug = "print"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = "plot"
_ = pcv.morphology.skeletonize(mask=mask)
pcv.params.debug = None
skeleton = pcv.morphology.skeletonize(mask=mask)
arr = np.array(skeleton == input_skeleton)
assert arr.all()
def test_plantcv_morphology_segment_sort():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_sort")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skeleton)
pcv.params.debug = "print"
_ = pcv.morphology.segment_sort(skeleton, seg_objects, mask=skeleton)
pcv.params.debug = "plot"
leaf_obj, stem_obj = pcv.morphology.segment_sort(skeleton, seg_objects)
assert len(leaf_obj) == 36
def test_plantcv_morphology_segment_tangent_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2, label="prefix")
pcv.params.debug = "plot"
_ = pcv.morphology.segment_tangent_angle(skel, objs, 2)
assert len(pcv.outputs.observations['default']['segment_tangent_angle']['value']) == 73
def test_plantcv_morphology_segment_id():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_tangent_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
objects = np.load(os.path.join(TEST_DATA, TEST_SKELETON_OBJECTS), encoding="latin1")
objs = [objects[arr_n] for arr_n in objects]
pcv.params.debug = "print"
_ = pcv.morphology.segment_id(skel, objs)
pcv.params.debug = "plot"
_, labeled_img = pcv.morphology.segment_id(skel, objs, mask=skel)
assert np.sum(labeled_img) > np.sum(skel)
def test_plantcv_morphology_segment_insertion_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 3, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
assert pcv.outputs.observations['default']['segment_insertion_angle']['value'][:6] == ['NA', 'NA', 'NA',
24.956918822001636,
50.7313343343401,
56.427712102130734]
def test_plantcv_morphology_segment_insertion_angle_bad_stem():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
stem_obj = [leaf_obj[0], leaf_obj[10]]
with pytest.raises(RuntimeError):
_ = pcv.morphology.segment_insertion_angle(pruned, segmented_img, leaf_obj, stem_obj, 10)
def test_plantcv_morphology_segment_combine():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
# Test with list of IDs input
_, new_objects = pcv.morphology.segment_combine([0, 1], seg_objects, skel)
assert len(new_objects) + 1 == len(seg_objects)
def test_plantcv_morphology_segment_combine_lists():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "print"
# Test with list of lists input
_, new_objects = pcv.morphology.segment_combine([[0, 1, 2], [3, 4]], seg_objects, skel)
assert len(new_objects) + 3 == len(seg_objects)
def test_plantcv_morphology_segment_combine_bad_input():
skel = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON_PRUNED), -1)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=skel)
pcv.params.debug = "plot"
with pytest.raises(RuntimeError):
_, new_objects = pcv.morphology.segment_combine([0.5, 1.5], seg_objects, skel)
def test_plantcv_morphology_analyze_stem():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_analyze_stem")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, segmented_img, _ = pcv.morphology.prune(skel_img=skeleton, size=6)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
leaf_obj, stem_obj = pcv.morphology.segment_sort(pruned, seg_objects)
pcv.params.debug = "plot"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj, label="prefix")
pcv.params.debug = "print"
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == -12.531776428222656
def test_plantcv_morphology_analyze_stem_bad_angle():
# Clear previous outputs
pcv.outputs.clear()
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_morphology_segment_insertion_angle")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
skeleton = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_SKELETON), -1)
pruned, _, _ = pcv.morphology.prune(skel_img=skeleton, size=5)
segmented_img, seg_objects = pcv.morphology.segment_skeleton(skel_img=pruned)
_, _ = pcv.morphology.segment_sort(pruned, seg_objects)
# print([stem_obj[3]])
# stem_obj = [stem_obj[3]]
stem_obj = [[[[1116, 1728]], [[1116, 1]]]]
_ = pcv.morphology.analyze_stem(rgb_img=segmented_img, stem_objects=stem_obj)
assert pcv.outputs.observations['default']['stem_angle']['value'] == 22877334.0
# ########################################
# Tests for the hyperspectral subpackage
# ########################################
def test_plantcv_hyperspectral_read_data_default():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_read_data_default")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
_ = pcv.hyperspectral.read_data(filename=spectral_filename)
pcv.params.debug = "print"
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_no_default_bands():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_NO_DEFAULT)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_read_data_approx_pseudorgb():
pcv.params.debug = "plot"
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA_APPROX_PSEUDO)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
assert np.shape(array_data.array_data) == (1, 1600, 978)
def test_plantcv_spectral_index_ndvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_gdvi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_gdvi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_gdvi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.gdvi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.gdvi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_savi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_savi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_savi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.savi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ci_rededge():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ci_rededge")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ci_rededge_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ci_rededge(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ci_rededge(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri550():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri550")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri550_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri550(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri550(hsi=index_array, distance=20)
def test_plantcv_spectral_index_cri700():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_cri700")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_cri700_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.cri700(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.cri700(hsi=index_array, distance=20)
def test_plantcv_spectral_index_egi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_egi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
index_array = pcv.spectral_index.egi(rgb_img=rgb_img)
assert np.shape(index_array.array_data) == (2056, 2454) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_evi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_evi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.evi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.evi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mcari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mcari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mcari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mcari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mcari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_mtci():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_mtci")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_mtci_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.mtci(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.mtci(hsi=index_array, distance=20)
def test_plantcv_spectral_index_ndre():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_ndre")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_ndre_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.ndre(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.ndre(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psnd_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psnd_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psnd_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psnd_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psnd_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_psri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_psri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_psri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.psri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.psri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chla():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chla")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chla_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chla(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chla(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_chlb():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_chlb")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_chlb_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_chlb(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_chlb(hsi=index_array, distance=20)
def test_plantcv_spectral_index_pssr_car():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_pssr_car")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_pssr_car_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.pssr_car(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.pssr_car(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rgri():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rgri")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rgri_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rgri(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rgri(hsi=index_array, distance=20)
def test_plantcv_spectral_index_rvsi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_rvsi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_rvsi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.rvsi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.rvsi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sipi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sipi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sipi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sipi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sipi(hsi=index_array, distance=20)
def test_plantcv_spectral_index_sr():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_sr")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_sr_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.sr(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.sr(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vari():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vari")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vari_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vari(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vari(hsi=index_array, distance=20)
def test_plantcv_spectral_index_vi_green():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_vi_green")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_vi_green_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.vi_green(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.vi_green(hsi=index_array, distance=20)
def test_plantcv_spectral_index_wi():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_index_wi")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
assert np.shape(index_array.array_data) == (1, 1600) and np.nanmax(index_array.pseudo_rgb) == 255
def test_plantcv_spectral_index_wi_bad_input():
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
pcv.params.debug = None
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.wi(hsi=array_data, distance=20)
with pytest.raises(RuntimeError):
_ = pcv.spectral_index.wi(hsi=index_array, distance=20)
def test_plantcv_hyperspectral_analyze_spectral():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_spectral")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
# pcv.params.debug = "plot"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True)
# pcv.params.debug = "print"
# _ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
pcv.params.debug = None
_ = pcv.hyperspectral.analyze_spectral(array=array_data, mask=mask, histplot=True, label="prefix")
assert len(pcv.outputs.observations['prefix']['spectral_frequencies']['value']) == 978
def test_plantcv_hyperspectral_analyze_index():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
# pcv.params.debug = "print"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
# pcv.params.debug = "plot"
# pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_set_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_set_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, histplot=True, min_bin=0, max_bin=1)
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_auto_range():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin="auto", max_bin="auto")
assert pcv.outputs.observations['default']['mean_index_savi']['value'] > 0
def test_plantcv_hyperspectral_analyze_index_outside_range_warning():
import io
from contextlib import redirect_stdout
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_analyze_index_auto_range")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = np.ones(np.shape(index_array.array_data), dtype=np.uint8) * 255
f = io.StringIO()
with redirect_stdout(f):
pcv.params.debug = None
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img, min_bin=.5, max_bin=.55, label="i")
out = f.getvalue()
# assert os.listdir(cache_dir) is 0
assert out[0:10] == 'WARNING!!!'
def test_plantcv_hyperspectral_analyze_index_bad_input_mask():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_index():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
index_array = pcv.spectral_index.savi(hsi=array_data, distance=801)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
index_array.array_data = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK))
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=index_array, mask=mask_img)
def test_plantcv_hyperspectral_analyze_index_bad_input_datatype():
pcv.params.debug = None
spectral_filename = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
array_data = pcv.hyperspectral.read_data(filename=spectral_filename)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
with pytest.raises(RuntimeError):
pcv.hyperspectral.analyze_index(index_array=array_data, mask=mask_img)
def test_plantcv_hyperspectral_calibrate():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_calibrate")
os.mkdir(cache_dir)
raw = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
white = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_WHITE)
dark = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DARK)
raw = pcv.hyperspectral.read_data(filename=raw)
white = pcv.hyperspectral.read_data(filename=white)
dark = pcv.hyperspectral.read_data(filename=dark)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
pcv.params.debug = "print"
calibrated = pcv.hyperspectral.calibrate(raw_data=raw, white_reference=white, dark_reference=dark)
assert np.shape(calibrated.array_data) == (1, 1600, 978)
def test_plantcv_hyperspectral_extract_wavelength():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_hyperspectral_extract_wavelength")
os.mkdir(cache_dir)
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
pcv.params.debug = "plot"
_ = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
pcv.params.debug = "print"
new = pcv.hyperspectral.extract_wavelength(spectral_data=spectral, wavelength=500)
assert np.shape(new.array_data) == (1, 1600)
def test_plantcv_hyperspectral_avg_reflectance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
mask_img = cv2.imread(os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_MASK), -1)
spectral = pcv.hyperspectral.read_data(filename=spectral)
avg_reflect = pcv.hyperspectral._avg_reflectance(spectral, mask=mask_img)
assert len(avg_reflect) == 978
def test_plantcv_hyperspectral_inverse_covariance():
spectral = os.path.join(HYPERSPECTRAL_TEST_DATA, HYPERSPECTRAL_DATA)
spectral = pcv.hyperspectral.read_data(filename=spectral)
inv_cov = pcv.hyperspectral._inverse_covariance(spectral)
assert np.shape(inv_cov) == (978, 978)
# ########################################
# Tests for the photosynthesis subpackage
# ########################################
def test_plantcv_photosynthesis_read_dat():
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_photosynthesis_read_dat")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
pcv.params.debug = "plot"
fluor_filename = os.path.join(FLUOR_TEST_DATA, FLUOR_IMG)
_, _, _ = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
pcv.params.debug = "print"
fdark, fmin, fmax = pcv.photosynthesis.read_cropreporter(filename=fluor_filename)
assert np.sum(fmin) < np.sum(fmax)
def test_plantcv_photosynthesis_analyze_fvfm():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# filename = os.path.join(cache_dir, 'plantcv_fvfm_hist.png')
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
# Test with debug = "print"
pcv.params.debug = "print"
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000, label="prefix")
# Test with debug = "plot"
pcv.params.debug = "plot"
fvfm_images = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
assert len(fvfm_images) != 0
def test_plantcv_photosynthesis_analyze_fvfm_print_analysis_results():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
result_file = os.path.join(cache_dir, "results.txt")
pcv.print_results(result_file)
pcv.outputs.clear()
assert os.path.exists(result_file)
def test_plantcv_photosynthesis_analyze_fvfm_bad_fdark():
# Clear previous outputs
pcv.outputs.clear()
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_analyze_fvfm")
os.mkdir(cache_dir)
pcv.params.debug_outdir = cache_dir
# Read in test data
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FDARK), -1)
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark + 3000, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
check = pcv.outputs.observations['default']['fdark_passed_qc']['value'] is False
assert check
def test_plantcv_photosynthesis_analyze_fvfm_bad_input():
fdark = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
fmin = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMIN), -1)
fmax = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMAX), -1)
fmask = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_FMASK), -1)
with pytest.raises(RuntimeError):
_ = pcv.photosynthesis.analyze_fvfm(fdark=fdark, fmin=fmin, fmax=fmax, mask=fmask, bins=1000)
# ##############################
# Tests for the roi subpackage
# ##############################
def test_plantcv_roi_from_binary_image():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_from_binary_image")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Create a binary image
bin_img = np.zeros(np.shape(rgb_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Create a binary image
bin_img = np.zeros(np.shape(gray_img)[0:2], dtype=np.uint8)
cv2.rectangle(bin_img, (100, 100), (1000, 1000), 255, -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.from_binary_image(bin_img=bin_img, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 3600, 1, 2)
def test_plantcv_roi_from_binary_image_bad_binary_input():
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Binary input is required but an RGB input is provided
with pytest.raises(RuntimeError):
_, _ = pcv.roi.from_binary_image(bin_img=rgb_img, img=rgb_img)
def test_plantcv_roi_rectangle():
# Test cache directory
cache_dir = os.path.join(TEST_TMPDIR, "test_plantcv_roi_rectangle")
os.mkdir(cache_dir)
# Read in test RGB image
rgb_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_COLOR))
# Test with debug = "print"
pcv.params.debug = "print"
pcv.params.debug_outdir = cache_dir
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = "plot"
pcv.params.debug = "plot"
_, _ = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Test with debug = None
pcv.params.debug = None
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=rgb_img)
# Assert the contours and hierarchy lists contain only the ROI
assert np.shape(roi_contour) == (1, 4, 1, 2)
def test_plantcv_roi_rectangle_grayscale_input():
# Read in a test grayscale image
gray_img = cv2.imread(os.path.join(TEST_DATA, TEST_INPUT_GRAY), -1)
# Test with debug = "plot"
pcv.params.debug = "plot"
roi_contour, roi_hierarchy = pcv.roi.rectangle(x=100, y=100, h=500, w=500, img=gray_img)
# Assert the contours and hierarchy lists contain only the ROI
assert | np.shape(roi_contour) | numpy.shape |
import numpy as np
import os
import random
import cv2
def load_imgpath_labels(filename, labels_num=1, shuffle=True):
imgpath=[]
labels=[]
with open(os.path.join(filename)) as f:
lines_list = f.readlines()
if shuffle:
random.shuffle(lines_list)
for lines in lines_list:
line = lines.rstrip().split(',')
label = None
if labels_num == 1:
label = int(line[1])
else:
for i in range(labels_num):
label.append(int(line[i+1]))
imgpath.append(line[0])
labels.append(label)
return np.array(imgpath), np.array(labels)
def get_input_img(filename, input_size=32):
img = cv2.imread(filename)
img = cv2.resize(img, (input_size, input_size))
img = | np.array(img, dtype=np.float32) | numpy.array |
#!/Users/bernardroesler/anaconda3/envs/insight/bin/python3
#==============================================================================
# File: run_kstest.py
# Created: 07/08/2018, 19:02
# Author: <NAME>
#
"""
Description:
"""
#==============================================================================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from cycler import cycler
from scipy.stats import norm, uniform, kstest
np.random.seed(56)
plt.ion()
def make_kstest_plots(dist, compare='norm'):
fig = plt.figure(1)
plt.clf()
ax = plt.gca()
n = np.array([int(10**i) for i in range(7)])
n = np.hstack((n, 3*n))
n.sort()
D = | np.zeros(n.size) | numpy.zeros |
import h5py
import mpi_init
import subfind_data
import cosmology
import tree_build
import numpy as np
from scipy.interpolate import interp1d
def compute_accretion_rates(mpi, path, datasets, data, snap=99, mcut=1.0e12):
"""
Compute the accretion rate based on different mass estimates
"""
if not mpi.Rank:
print(" > Examining: {0}\n > Snap: {1:03d}".format(path, snap), flush=True)
# First we need to find the halos of interest
subfind_table = subfind_data.build_table(mpi, sim=path, snap=snap)
subfind_table.select_halos(mpi, cut=mcut)
if not mpi.Rank:
print(" > Found {0:d} halo(s)".format(len(subfind_table.tags)), flush=True)
# Now rebuild the trees for those halos
if not mpi.Rank:
print(" > Building merger tree for halos...", flush=True)
Mtrees = tree_build.trees(mpi, path, subfind_table, snap)
Mtrees.build_branches(mpi)
# Initialize cosmology class instance
cosmo = cosmology.cosmology(
subfind_table.hub, subfind_table.omega_m, subfind_table.omega_L
)
# Age of Universe at snapshots
if not mpi.Rank:
print(" > Computing mass accretion rates", flush=True)
age_Gyr = cosmo.age(Mtrees.zred)
# Now compute accretion rates for halos
tdyn_500c_Gyr = cosmo.t_dynamic_Gyr(Mtrees.zred, delta=500.0, mode="CRIT")
tdyn_200c_Gyr = cosmo.t_dynamic_Gyr(Mtrees.zred, delta=200.0, mode="CRIT")
tdyn_200m_Gyr = cosmo.t_dynamic_Gyr(Mtrees.zred, delta=200.0, mode="MEAN")
tdyn_vir_Gyr = cosmo.t_dynamic_Gyr(Mtrees.zred, mode="VIR")
# Compute age of Universe one dynamical time ago for each snapshot
dt_500c_Gyr = age_Gyr - tdyn_500c_Gyr
dt_200c_Gyr = age_Gyr - tdyn_200c_Gyr
dt_200m_Gyr = age_Gyr - tdyn_200m_Gyr
dt_vir_Gyr = age_Gyr - tdyn_vir_Gyr
# Delta log(a) of a dynamical time for all snapshots
aexp_int = interp1d(age_Gyr, Mtrees.aexp, fill_value="extrapolate")
Delta_lgAexp_500c = np.log(Mtrees.aexp) - np.log(aexp_int(dt_500c_Gyr))
Delta_lgAexp_200c = np.log(Mtrees.aexp) - np.log(aexp_int(dt_200c_Gyr))
Delta_lgAexp_200m = np.log(Mtrees.aexp) - np.log(aexp_int(dt_200m_Gyr))
Delta_lgAexp_vir = np.log(Mtrees.aexp) - np.log(aexp_int(dt_vir_Gyr))
# Now loop over haloes computing Delta log(M) -- with appropriate mass definition
Delta_lgM500c = np.zeros(Mtrees.M500c.shape, dtype=np.float)
Delta_lgM200c = np.zeros(Mtrees.M200c.shape, dtype=np.float)
Delta_lgM200m = np.zeros(Mtrees.M200m.shape, dtype=np.float)
Delta_lgMvir = np.zeros(Mtrees.Mvir.shape, dtype=np.float)
for j in range(0, len(Mtrees.index), 1):
lgM500c_int = interp1d(
age_Gyr, np.log(Mtrees.M500c[j]), fill_value="extrapolate"
)
lgM200c_int = interp1d(
age_Gyr, np.log(Mtrees.M200c[j]), fill_value="extrapolate"
)
lgM200m_int = interp1d(
age_Gyr, | np.log(Mtrees.M200m[j]) | numpy.log |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import time
import json
import numpy as np
import numba as nb
from enum import Enum
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Core.snapshot_pf_data import compile_snapshot_circuit
from GridCal.Engine.Simulations.LinearFactors.linear_analysis import LinearAnalysis, make_worst_contingency_transfer_limits
from GridCal.Engine.Simulations.driver_types import SimulationTypes
from GridCal.Engine.Simulations.result_types import ResultTypes
from GridCal.Engine.Simulations.results_table import ResultsTable
from GridCal.Engine.Simulations.results_template import ResultsTemplate
from GridCal.Engine.Simulations.driver_template import DriverTemplate
########################################################################################################################
# Optimal Power flow classes
########################################################################################################################
class AvailableTransferMode(Enum):
Generation = 0
InstalledPower = 1
Load = 2
GenerationAndLoad = 3
@nb.njit()
def compute_alpha(ptdf, P0, Pinstalled, idx1, idx2, bus_types, dT=1.0, mode=0):
"""
Compute all lines' ATC
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param P0: all bus injections [p.u.]
:param idx1: bus indices of the sending region
:param idx2: bus indices of the receiving region
:param bus_types: Array of bus types {1: pq, 2: pv, 3: slack}
:param dT: Exchange amount
:param mode: Type of power shift
0: shift generation based on the current generated power
1: shift generation based on the installed power
2: shift load
3 (or else): shift using generation and load
:return: Exchange sensitivity vector for all the lines
"""
nbr = ptdf.shape[0]
nbus = ptdf.shape[1]
# declare the bus injections increment due to the transference
dP = np.zeros(nbus)
if mode == 0: # move the generators based on the generated power --------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n1 += P0[i]
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n2 += P0[i]
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = -dT * P0[i] / abs(n2)
elif mode == 1: # move the generators based on the installed power --------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n1 += Pinstalled[i]
for i in idx1:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = dT * Pinstalled[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
n2 += Pinstalled[i]
for i in idx2:
if bus_types[i] == 2 or bus_types[i] == 3: # it is a PV or slack node
dP[i] = -dT * Pinstalled[i] / abs(n2)
elif mode == 2: # move the load ------------------------------------------------------
# set the sending power increment proportional to the current power (Area 1)
n1 = 0.0
for i in idx1:
if bus_types[i] == 1: # it is a PV or slack node
n1 += P0[i]
for i in idx1:
if bus_types[i] == 1: # it is a PV or slack node
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power (Area 2)
n2 = 0.0
for i in idx2:
if bus_types[i] == 1: # it is a PV or slack node
n2 += P0[i]
for i in idx2:
if bus_types[i] == 1: # it is a PV or slack node
dP[i] = -dT * P0[i] / abs(n2)
else: # move all of it -----------------------------------------------------------------
# set the sending power increment proportional to the current power
n1 = 0.0
for i in idx1:
n1 += P0[i]
for i in idx1:
dP[i] = dT * P0[i] / abs(n1)
# set the receiving power increment proportional to the current power
n2 = 0.0
for i in idx2:
n2 += P0[i]
for i in idx2:
dP[i] = -dT * P0[i] / abs(n2)
# ----------------------------------------------------------------------------------------
# compute the line flow increments due to the exchange increment dT in MW
dflow = ptdf.dot(dP)
# compute the sensitivity
alpha = dflow / dT
return alpha
@nb.njit()
def compute_atc(br_idx, ptdf, lodf, alpha, flows, rates, contingency_rates, threshold=0.005):
"""
Compute all lines' ATC
:param br_idx: array of branch indices to analyze
:param ptdf: Power transfer distribution factors (n-branch, n-bus)
:param lodf: Line outage distribution factors (n-branch, n-outage branch)
:param alpha: Branch sensitivities to the exchange [p.u.]
:param flows: branches power injected at the "from" side [MW]
:param rates: all branches rates vector
:param contingency_rates: all branches contingency rates vector
:param threshold: value that determines if a line is studied for the ATC calculation
:return:
beta_mat: Matrix of beta values (branch, contingency_branch)
beta: vector of actual beta value used for each branch (n-branch)
atc_n: vector of ATC values in "N" (n-branch)
atc_final: vector of ATC in "N" or "N-1" whatever is more limiting (n-branch)
atc_limiting_contingency_branch: most limiting contingency branch index vector (n-branch)
atc_limiting_contingency_flow: most limiting contingency flow vector (n-branch)
"""
nbr = len(br_idx)
# explore the ATC
atc_n = np.zeros(nbr)
atc_mc = np.zeros(nbr)
atc_final = np.zeros(nbr)
beta_mat = np.zeros((nbr, nbr))
beta_used = np.zeros(nbr)
atc_limiting_contingency_branch = np.zeros(nbr)
atc_limiting_contingency_flow = np.zeros(nbr)
# processed = list()
# mm = 0
for im, m in enumerate(br_idx): # for each branch
if abs(alpha[m]) > threshold and abs(flows[m]) < rates[m]: # if the branch is relevant enough for the ATC...
# compute the ATC in "N"
if alpha[m] == 0:
atc_final[im] = np.inf
elif alpha[m] > 0:
atc_final[im] = (rates[m] - flows[m]) / alpha[m]
else:
atc_final[im] = (-rates[m] - flows[m]) / alpha[m]
# remember the ATC in "N"
atc_n[im] = atc_final[im]
# set to the current branch, since we don't know if there will be any contingency that make the ATC worse
atc_limiting_contingency_branch[im] = m
# explore the ATC in "N-1"
for ic, c in enumerate(br_idx): # for each contingency
# compute the exchange sensitivity in contingency conditions
beta_mat[im, ic] = alpha[m] + lodf[m, c] * alpha[c]
if m != c:
# compute the contingency flow
contingency_flow = flows[m] + lodf[m, c] * flows[c]
# set the default values (worst contingency by itself, not comparing with the base situation)
if abs(contingency_flow) > abs(atc_limiting_contingency_flow[im]):
atc_limiting_contingency_flow[im] = contingency_flow # default
atc_limiting_contingency_branch[im] = c
# now here, do compare with the base situation
if abs(beta_mat[im, ic]) > threshold and abs(contingency_flow) <= contingency_rates[m]:
# compute the ATC in "N-1"
if beta_mat[im, ic] == 0:
atc_mc[im] = np.inf
elif beta_mat[im, ic] > 0:
atc_mc[im] = (contingency_rates[m] - contingency_flow) / beta_mat[im, ic]
else:
atc_mc[im] = (-contingency_rates[m] - contingency_flow) / beta_mat[im, ic]
# refine the ATC to the most restrictive value every time
if abs(atc_mc[im]) < abs(atc_final[im]):
atc_final[im] = atc_mc[im]
beta_used[im] = beta_mat[im, ic]
atc_limiting_contingency_flow[im] = contingency_flow
atc_limiting_contingency_branch[im] = c
return beta_mat, beta_used, atc_n, atc_mc, atc_final, atc_limiting_contingency_branch, atc_limiting_contingency_flow
class AvailableTransferCapacityResults(ResultsTemplate):
def __init__(self, n_bus, br_names, bus_names, bus_types, bus_idx_from, bus_idx_to, br_idx):
"""
:param n_bus:
:param br_names:
:param bus_names:
:param bus_types:
:param bus_idx_from:
:param bus_idx_to:
:param br_idx:
"""
ResultsTemplate.__init__(self,
name='ATC Results',
available_results=[ResultTypes.AvailableTransferCapacity,
ResultTypes.NetTransferCapacity,
ResultTypes.AvailableTransferCapacityN,
ResultTypes.AvailableTransferCapacityAlpha,
ResultTypes.AvailableTransferCapacityBeta,
ResultTypes.AvailableTransferCapacityReport
],
data_variables=['alpha',
'beta_mat',
'beta',
'atc',
'atc_n',
'atc_limiting_contingency_branch',
'atc_limiting_contingency_flow',
'base_flow',
'rates',
'contingency_rates',
'report',
'report_headers',
'report_indices',
'branch_names',
'bus_names',
'bus_types',
'bus_idx_from',
'bus_idx_to',
'br_idx'])
self.n_br = len(br_idx)
self.n_bus = n_bus
self.branch_names = np.array(br_names, dtype=object)
self.bus_names = bus_names
self.bus_types = bus_types
self.bus_idx_from = bus_idx_from
self.bus_idx_to = bus_idx_to
self.br_idx = br_idx
# stores the worst transfer capacities (from to) and (to from)
self.rates = np.zeros(self.n_br)
self.contingency_rates = np.zeros(self.n_br)
self.base_exchange = 0
self.alpha = np.zeros(self.n_br)
self.atc = np.zeros(self.n_br)
self.atc_n = np.zeros(self.n_br)
self.atc_mc = | np.zeros(self.n_br) | numpy.zeros |
#scipy.signal.istft example
#https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.istft.html
#
import numpy as np #added by author
from scipy import signal
import matplotlib.pyplot as plt
#Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by 0.001 V**2/Hz of white noise sampled at 1024 Hz.
#テスト信号、1024 Hzでサンプリングされた0.001 V ** 2 / Hzのホワイトノイズで破損した50 Hzの2 Vrmsの正弦波を生成します
fs = 1024
N = 10*fs
nperseg = 64 #2048 #1024 #128 #256 #512
amp = 2 * | np.sqrt(2) | numpy.sqrt |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 09:06:52 2021
@author: yohanna
"""
import itertools
import numpy as np
import config
p = config.setup()
import math
def nCr(n,k):
'''
Function for calculate combination
C(n, k) = n!/(k! * (n-k)!), for 0 <= k <= n
'''
f = math.factorial
return f(n) // f(k) // f(n-k)
def DCP_rs(data, A_true, A_est, M_gt, Z):
from scipy import stats
'''
Performance evaluation (D_cp distance over all samples)
Based on this paper:(DCP calculation for noisy data through robust statistics - Not used in our paper)
https://feb.kuleuven.be/public/u0017833/PDF-FILES/Croux_Dehon5.pdf
Arguments:
data : Input data;
{train_data, test_data}
A_true : hidden true parameters A_true = (A, sigma_y);
A_est : our estimates A_est = (A, sigma_hat);
Returns:
DKL = sum(DCP) KL divergence.
'''
n = p.n
DCP = np.array([])
for child in range(n):
parents = [list(pa) for pa in (np.nonzero(A_true[:, child]))]
parents = list(itertools.chain(*parents))
''' Calculate M: covariance matrix among parents'''
M = M_gt[np.ix_(parents, parents)]
''' Calculate a_true and a_est'''
index_true = A_true[:, child]
index_est = A_est[:, child]
a_true = index_true[index_true != 0]
a_est = index_est[index_est != 0]
''' delta = [a_true - a_est]'''
delta = a_true - a_est
''' Calculate sigma_y (true)'''
if len(a_est) == 0:
points = data[:, child]
a = np.abs(data[:, child])
elif len(a_est) == 1:
points = data[:, child] - a_est * np.transpose(data[:, parents])
points = np.squeeze(points)
a = np.abs(data[:, child] - a_est * np.transpose(data[:, parents]))
elif len(a_est) > 1:
points = data[:, child] - np.matmul(np.array(a_est), np.transpose(data[:, parents]))
a = np.abs(data[:, child] - np.matmul(np.array(a_est), np.transpose(data[:, parents])))
dist = []
for i in list(range(len(points))):
for j in list(range(i)):
d = np.abs(points[i] - points[j])
dist.append(d)
dist_new = np.sort(dist)
index = nCr(np.round(len(points)/2) + 1, 2)
smallest_dist = dist_new[index]
sigma_hat = 2.219 *smallest_dist
sigma_hat2 = np.median(a)/0.674
sigma_y = np.sqrt(np.diag(Z)[child])
if sigma_y == 0 or sigma_hat == 0:
DCP = 0
else:
''' DCP can be calculated as follows: '''
if len(delta) == 1:
DMD = (delta * M * delta)/(2 * np.square(sigma_hat))
else:
DMD = np.matmul(np.matmul(np.transpose(
delta), M), delta)/(2 * np.square(sigma_hat))
dcp = np.log(sigma_hat/sigma_y) + (np.square(sigma_y) - np.square(sigma_hat))/(2*np.square(sigma_hat)) + DMD
DCP = | np.append(DCP, dcp) | numpy.append |
# general libraries
import numpy as np
from scipy import fftpack
from .matching_tools import \
get_integer_peak_location, reposition_templates_from_center, \
make_templates_same_size
from .matching_tools_frequency_filters import \
raised_cosine, thresh_masking, normalize_power_spectrum, gaussian_mask
# general frequency functions
def create_complex_DCT(I, C_c, C_s): #wip
C_cc,C_ss = C_c*I*C_c.T, C_s*I*C_s.T
C_sc,C_cs = C_s*I*C_c.T, C_c*I*C_s.T
C = C_cc-C_ss + 1j*(-(C_cs+C_sc))
return C
def create_complex_fftpack_DCT(I):
# DCT-based complex transform: {(C_cc - C_ss) -j(C_cs + C_sc)}
C_cc = fftpack.dct(fftpack.dct(I, type=2, axis=0), type=2, axis=1)
C_ss = fftpack.dst(fftpack.dst(I, type=2, axis=0), type=2, axis=1)
C_cs = fftpack.dct(fftpack.dst(I, type=2, axis=0), type=2, axis=1)
C_sc = fftpack.dst(fftpack.dct(I, type=2, axis=0), type=2, axis=1)
C = (C_cc - C_ss) - 1j*(C_cs + C_sc)
return C
def get_cosine_matrix(I,N=None): #wip
(L,_) = I.shape
if N==None:
N = np.copy(L)
C = np.zeros((L,L))
for k in range(L):
for n in range(N):
if k == 0:
C[k,n] = np.sqrt(np.divide(1, L, out=np.zeros_like(L), where=L!=0))
else:
C[k,n] = np.sqrt(np.divide(2, L, out=np.zeros_like(L), where=L!=0))*\
np.cos(np.divide(np.pi*k*(1/2+n), L, out=np.zeros_like(L), where=L!=0))
return(C)
def get_sine_matrix(I,N=None): #wip
(L,_) = I.shape
if N==None:
# make a square matrix
N = np.copy(L)
C = np.zeros((L,L))
for k in range(L):
for n in range(N):
if k == 0:
C[k,n] = np.sqrt(np.divide(1, L, out=np.zeros_like(L), where=L!=0))
else:
C[k,n] = np.sqrt(np.divide(2, L, out=np.zeros_like(L), where=L!=0))*\
np.sin(np.divide(np.pi*k*(1/2+n), L, out=np.zeros_like(L), where=L!=0))
return(C)
def upsample_dft(Q, up_m=0, up_n=0, upsampling=1, \
i_offset=0, j_offset=0):
(m,n) = Q.shape
if up_m==0:
up_m = m.copy()
if up_n==0:
up_n = n.copy()
kernel_collumn = np.exp((1j*2*np.pi/(n*upsampling)) *\
( np.fft.fftshift(np.arange(n) - \
(n//2))[:,np.newaxis] )*\
( np.arange(up_n) - j_offset ))
kernel_row = np.exp((1j*2*np.pi/(m*upsampling)) *\
( np.arange(up_m)[:,np.newaxis] - i_offset )*\
( np.fft.fftshift(np.arange(m) - (m//2)) ))
Q_up = np.matmul(kernel_row, np.matmul(Q,kernel_collumn))
return Q_up
def pad_dft(Q, m_new, n_new):
assert type(Q)==np.ndarray, ("please provide an array")
(m,n) = Q.shape
Q_ij = np.fft.fftshift(Q) # in normal configuration
center_old = np.array([m//2, n//2])
Q_new = np.zeros((m_new, n_new), dtype=np.complex64)
center_new = np.array([m_new//2, n_new//2])
center_offset = center_new - center_old
# fill the old data in the new array
Q_new[np.maximum(center_offset[0], 0):np.minimum(center_offset[0]+m, m_new),\
np.maximum(center_offset[1], 0):np.minimum(center_offset[1]+n, n_new)]\
= \
Q_ij[np.maximum(-center_offset[0], 0):\
np.minimum(-center_offset[0]+m_new, m),\
np.maximum(-center_offset[1], 0):\
np.minimum(-center_offset[1]+n_new, n)]
Q_new = (np.fft.fftshift(Q_new)*m_new*n_new)/(m*n) # scaling
return Q_new
# frequency/spectrum matching functions
def cosi_corr(I1, I2, beta1=.35, beta2=.50, m=1e-4):
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
mt,nt = I1.shape[0], I1.shape[1] # dimensions of the template
W1 = raised_cosine(np.zeros((mt,nt)), beta1)
W2 = raised_cosine(np.zeros((mt,nt)), beta2)
if I1.size==I2.size: # if templates are same size, no refinement is done
tries = [0]
else:
tries = [0, 1]
di,dj, m0 = 0,0,np.array([0, 0])
for trying in tries: # implement refinement step to have more overlap
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = reposition_templates_from_center(I1,I2,di,dj)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (W1*S1)*np.conj((W2*S2))
else:
Q_b = (W1*S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = reposition_templates_from_center(I1,I2,di,dj)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (W1*S1)*np.conj((W2*S2))
# transform back to spatial domain
C = np.real(np.fft.fftshift(np.fft.ifft2(Q)))
ddi, ddj,_,_ = get_integer_peak_location(C)
m_int = np.round(np.array([ddi, ddj])).astype(int)
if np.amax(abs(np.array([ddi, ddj])))<.5:
break
else:
di,dj = m_int[0], m_int[1]
m0[0] += di
m0[1] += dj
WS = thresh_masking(S1, m)
Qn = normalize_power_spectrum(Q)
return Qn, WS, m0
def cosine_corr(I1, I2):
""" match two imagery through discrete cosine transformation
Parameters
----------
I1 : np.array, size=(m,n), dtype=float
array with intensities
I2 : np.array, size=(m,n), dtype=float
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
create_complex_DCT, sign_only_corr
References
----------
.. [1] Li, et al. "DCT-based phase correlation motion estimation",
IEEE international conference on image processing, vol. 1, 2004.
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
# construct cosine and sine basis matrices
Cc, Cs = get_cosine_matrix(I1), get_sine_matrix(I1)
if I1.ndim==3: # multi-spectral frequency stacking
bands = I1.shape[2]
I1sub,I2sub = make_templates_same_size(I1,I2)
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
C1 = create_complex_fftpack_DCT(I1bnd)
C2 = create_complex_fftpack_DCT(I2bnd)
# C1 = create_complex_DCT(I1bnd, Cc, Cs)
# C2 = create_complex_DCT(I2bnd, Cc, Cs)
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1 = create_complex_fftpack_DCT(I1sub)
C2 = create_complex_fftpack_DCT(I2sub)
# C1 = create_complex_DCT(I1sub, Cc, Cs)
# C2 = create_complex_DCT(I2sub, Cc, Cs)
Q = (C1)*np.conj(C2)
Q = normalize_power_spectrum(Q)
C = np.fft.fftshift(np.real(np.fft.ifft2(Q)))
return C
def masked_cosine_corr(I1, I2, M1, M2): # wip
'''
work in progress
'''
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
assert type(M1)==np.ndarray, ('please provide an array')
assert type(M2)==np.ndarray, ('please provide an array')
M1, M2 = M1.astype(dtype=bool), M2.astype(dtype=bool)
# construct cosine and sine basis matrices
Cc, Cs = get_cosine_matrix(I1), get_sine_matrix(I1)
# look at how many frequencies can be estimated with this data
(m,n) = M1.shape
X1 = np.ones((m,n), dtype=bool)
min_span = int(np.floor(np.sqrt(min(np.sum(M1), np.sum(M2)))))
X1[min_span:,:] = False
X1[:,min_span:] = False
y = (I1[M1].astype(dtype=float)/255)-.5
# build matrix
Ccc = np.kron(Cc,Cc)
# shrink size
Ccc = Ccc[M1.flatten(),:] # remove rows, as these are missing
Ccc = Ccc[:,X1.flatten()] # remove collumns, since these can't be estimated
Icc = np.linalg.lstsq(Ccc, y, rcond=None)[0]
Icc = np.reshape(Icc, (min_span, min_span))
iCC = Ccc.T*y
np.reshape(Ccc.T*y, (min_span, min_span))
if I1.ndim==3: # multi-spectral frequency stacking
(mt,nt,bt) = I1.shape
(ms,ns,bs) = I2.shape
md, nd = np.round((ms-mt)/2).astype(int), np.round((ns-nt)/2).astype(int)
for i in range(bt): # loop through all bands
I1sub = I1[:,:,i]
I2sub = I2[md:-md, nd:-nd,i]
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1 = create_complex_DCT(I1sub, Cc, Cs)
C2 = create_complex_DCT(I2sub, Cc, Cs)
Q = (C1)*np.conj(C2)
return Q
def phase_only_corr(I1, I2):
""" match two imagery through phase only correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
phase_corr, symmetric_phase_corr, amplitude_comp_corr
Notes
-----
The matching equations are as follows:
.. math:: \mathbf{S}_1, \mathbf{S}_2 = \mathcal{F}[\mathbf{I}_1], \mathcal{F}[\mathbf{I}_2]
.. math:: \mathbf{W} = 1 / \mathbf{S}_2
.. math:: \mathbf{Q}_{12} = \mathbf{S}_1 [\mathbf{W}\mathbf{S}_2]^{\star}
where :math:`\mathcal{F}` denotes the Fourier transform and :math:`\star` a complex conjugate operation
References
----------
.. [1] Horner & Gianino, "Phase-only matched filtering", Applied optics,
vol. 23(6) pp.812--816, 1984.
.. [2] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> from .matching_tools import get_integer_peak_location
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = phase_only_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
W2 = np.divide(1, np.abs(I2bnd),
out=np.zeros_like(I2bnd), where=I2bnd!=0)
if i == 0:
Q = (S1)*np.conj((W2*S2))
else:
Q_b = (S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divide(1, np.abs(I2sub),
out=np.zeros_like(I2sub), where=I2sub!=0)
Q = (S1)*np.conj((W2*S2))
return Q
def projected_phase_corr(I1, I2, M1=np.array(()), M2=np.array(())):
""" match two imagery through separated phase correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim=2
array with intensities
I2 : np.array, size=(m,n), ndim=2
array with intensities
M1 : np.array, size=(m,n), ndim=2, dtype={bool,float}
array with mask
M2 : np.array, size=(m,n), ndim=2, dtype={bool,float}
array with mask
Returns
-------
C : np.array, size=(m,n), real
displacement surface
References
----------
.. [1] Zhang et al. "An efficient subpixel image registration based on the
phase-only correlations of image projections", IEEE proceedings of the
10th international symposium on communications and information
technologies, 2010.
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
I1sub,I2sub = make_templates_same_size(I1,I2)
if M1.size==0 : M1 = np.ones_like(I1sub)
if M2.size==0 : M2 = np.ones_like(I1sub)
def project_spectrum(I, M, axis=0):
if axis==1 : I,M = I.T, M.T
# projection
I_p = np.sum(I*M, axis=1)
# windowing
I_w = I_p*np.hamming(I_p.size)
# Fourier transform
S = np.fft.fft(I_w)
if axis==1:
S = S.T
return S
def phase_corr_1d(S1, S2):
# normalize power spectrum
Q12 = S1*np.conj(S2)
return Q12
S1_m = project_spectrum(I1sub, M1, axis=0)
S2_m = project_spectrum(I2sub, M2, axis=0)
Q12_m = phase_corr_1d(S1_m, S2_m)
C_m = np.fft.fftshift(np.real(np.fft.ifft(Q12_m)))
S1_n = project_spectrum(I1sub, M1, axis=1)
S2_n = project_spectrum(I2sub, M2, axis=1)
Q12_n = phase_corr_1d(S1_n, S2_n)
C_n = np.fft.fftshift(np.real(np.fft.ifft(Q12_n)))
C = np.sqrt(np.outer(C_m, C_n))
return C
def sign_only_corr(I1, I2): # to do
""" match two imagery through phase only correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
C : np.array, size=(m,n), real
displacement surface
See Also
--------
cosine_corr
References
----------
.. [1] Ito & Kiya, "DCT sign-only correlation with application to image
matching and the relationship with phase-only correlation",
IEEE international conference on acoustics, speech and signal
processing, vol. 1, 2007.
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
C1 = np.sign(fftpack.dctn(I1bnd, type=2)),
C2 = np.sign(fftpack.dctn(I2bnd, type=2))
if i == 0:
Q = C1*np.conj(C2)
else:
Q_b = (C1)*np.conj(C2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
C1,C2 = fftpack.dctn(I1sub, type=2), fftpack.dctn(I2sub, type=2)
# C1,C2 = np.multiply(C1,1/C1), np.multiply(C2,1/C2)
C1,C2 = np.sign(C1), np.sign(C2)
Q = (C1)*np.conj(C2)
C = fftpack.idctn(Q,type=1)
C_cc = fftpack.idct(fftpack.idct(Q, axis=1, type=1), axis=0, type=1)
C_sc = fftpack.idst(fftpack.idct(Q, axis=1, type=1), axis=0, type=1)
C_cs = fftpack.idct(fftpack.idst(Q, axis=1, type=1), axis=0, type=1)
C_ss = fftpack.idst(fftpack.idst(Q, axis=1, type=1), axis=0, type=1)
# iC1 = fft.idctn(C1,2)
# import matplotlib.pyplot as plt
# plt.imshow(iC1), plt.show()
return C
def symmetric_phase_corr(I1, I2):
""" match two imagery through symmetric phase only correlation (SPOF)
also known as Smoothed Coherence Transform (SCOT)
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
Notes
-----
The matching equations are as follows:
.. math:: \mathbf{S}_1, \mathbf{S}_2 = \mathcal{F}[\mathbf{I}_1], \mathcal{F}[\mathbf{I}_2]
.. math:: \mathbf{W} = 1 / \sqrt{||\mathbf{S}_1||||\mathbf{S}_2||}
.. math:: \mathbf{Q}_{12} = \mathbf{S}_1 [\mathbf{W}\mathbf{S}_2]^{\star}
where :math:`\mathcal{F}` denotes the Fourier transform and :math:`\star` a complex conjugate operation
References
----------
.. [1] Nikias & Petropoulou. "Higher order spectral analysis: a nonlinear
signal processing framework", Prentice hall. pp.313-322, 1993.
.. [2] Wernet. "Symmetric phase only filtering: a new paradigm for DPIV
data processing", Measurement science and technology, vol.16 pp.601-618,
2005.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> from .matching_tools import get_integer_peak_location
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = symmetric_phase_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
W2 = np.divided(1, np.sqrt(abs(S1))*np.sqrt(abs(S2)) )
if i == 0:
Q = (S1)*np.conj((W2*S2))
else:
Q_b = (S1)*np.conj((W2*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W2 = np.divide(1, np.sqrt(abs(I1sub))*np.sqrt(abs(I2sub)) )
Q = (S1)*np.conj((W2*S2))
return Q
def amplitude_comp_corr(I1, I2, F_0=0.04):
""" match two imagery through amplitude compensated phase correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
F_0 : float, default=4e-2
cut-off intensity in respect to maximum
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
References
----------
.. [1] Mu et al. "Amplitude-compensated matched filtering", Applied optics,
vol. 27(16) pp. 3461-3463, 1988.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = amplitude_comp_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
s_0 = F_0 * np.amax(abs(S2))
W = np.divide(1, abs(I2sub), \
out=np.zeros_like(I2sub), where=I2sub!=0 )
A = np.divide(s_0, abs(I2sub)**2, \
out=np.zeros_like(I2sub), where=I2sub!=0)
W[abs(S2)>s_0] = A
if i == 0:
Q = (S1)*np.conj((W*S2))
else:
Q_b = (S1)*np.conj((W*S2))
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
s_0 = F_0 * np.amax(abs(S2))
W = np.divide(1, abs(I2sub), \
out=np.zeros_like(I2sub), where=I2sub!=0)
A = np.divide(s_0, abs(I2sub)**2, \
out=np.zeros_like(I2sub), where=I2sub!=0)
W[abs(S2)>s_0] = A[abs(S2)>s_0]
Q = (S1)*np.conj((W*S2))
return Q
def robust_corr(I1, I2):
""" match two imagery through fast robust correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim=2
array with intensities
I2 : np.array, size=(m,n), ndim=2
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
References
----------
.. [1] Fitch et al. "Fast robust correlation", IEEE transactions on image
processing vol. 14(8) pp. 1063-1073, 2005.
.. [2] Essannouni et al. "Adjustable SAD matching algorithm using frequency
domain" Journal of real-time image processing, vol.1 pp.257-265
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = robust_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
I1sub,I2sub = make_templates_same_size(I1,I2)
p_steps = 10**np.arange(0,1,.5)
for idx, p in enumerate(p_steps):
I1p = 1/p**(1/3) * np.exp(1j*(2*p -1)*I1sub)
I2p = 1/p**(1/3) * np.exp(1j*(2*p -1)*I2sub)
S1p, S2p = np.fft.fft2(I1p), np.fft.fft2(I2p)
if idx==0:
Q = (S1p)*np.conj(S2p)
else:
Q += (S1p)*np.conj(S2p)
return Q
def orientation_corr(I1, I2):
""" match two imagery through orientation correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
phase_corr, windrose_corr
References
----------
.. [1] Fitch et al. "Orientation correlation", Proceeding of the Britisch
machine vison conference, pp. 1--10, 2002.
.. [2] <NAME>. "Evaluation of existing image matching methods for
deriving glacier surface displacements globally from optical satellite
imagery", Remote sensing of environment, vol. 118 pp. 339-355, 2012.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = orientation_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
S1,S2 = normalize_power_spectrum(S1),normalize_power_spectrum(S2)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
S1,S2 = normalize_power_spectrum(S1),normalize_power_spectrum(S2)
Q = (S1)*np.conj(S2)
return Q
def windrose_corr(I1, I2):
""" match two imagery through windrose phase correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
orientation_corr, phase_only_corr
References
----------
.. [1] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = windrose_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.sign(np.fft.fft2(I1sub)), np.sign(np.fft.fft2(I2sub))
Q = (S1)*np.conj(S2)
return Q
def phase_corr(I1, I2):
""" match two imagery through phase correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
orientation_corr, cross_corr
References
----------
.. [1] Kuglin & Hines. "The phase correlation image alignment method",
proceedings of the IEEE international conference on cybernetics and
society, pp. 163-165, 1975.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
Q = normalize_power_spectrum(Q)
else:
Q_b = (S1)*np.conj(S2)
Q_b = np.divide(Q_b, np.abs(Q), \
out=np.zeros_like(Q), where=Q!=0)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (S1)*np.conj(S2)
Q = normalize_power_spectrum(Q)
return Q
def gaussian_transformed_phase_corr(I1, I2):
""" match two imagery through Gaussian transformed phase correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
phase_corr
References
----------
.. [1] Eckstein et al. "Phase correlation processing for DPIV
measurements", Experiments in fluids, vol.45 pp.485-500, 2008.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = gaussian_transformed_phase_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
Q = normalize_power_spectrum(Q)
M = gaussian_mask(S1)
Q = np.multiply(M, Q)
else:
Q_b = (S1)*np.conj(S2)
Q_b = np.divide(Q_b, np.abs(Q),\
out=np.zeros_like(Q), where=Q!=0)
Q_b = np.multiply(M, Q_b)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (S1)*np.conj(S2)
Q = normalize_power_spectrum(Q)
M = gaussian_mask(Q)
Q = np.multiply(M, Q)
return Q
def upsampled_cross_corr(S1, S2, upsampling=2):
""" apply cros correlation, and upsample the correlation peak
Parameters
----------
S1 : np.array, size=(m,n), dtype=complex, ndim=2
array with intensities
S2 : np.array, size=(m,n), dtype=complex, ndim=2
array with intensities
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
pad_dft, upsample_dft
References
----------
.. [1] Guizar-Sicairo, et al. "Efficient subpixel image registration
algorithms", Applied optics, vol. 33 pp.156--158, 2008.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> di,dj = upsampled_cross_corr(im1, im2)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(S1)==np.ndarray, ('please provide an array')
assert type(S2)==np.ndarray, ('please provide an array')
(m,n) = S1.shape
S1,S2 = pad_dft(S1, 2*m, 2*n), pad_dft(S2, 2*m, 2*n)
# Q = S1*conj(S2)
Q = normalize_power_spectrum(S1)*np.conj(normalize_power_spectrum(S2))
# Q = normalize_power_spectrum(Q)
C = np.real(np.fft.ifft2(Q))
ij = np.unravel_index(np.argmax(C), C.shape, order='F')
di, dj = ij[::-1]
# transform to shifted fourier coordinate frame (being twice as big)
i_F = np.fft.fftshift(np.arange(-np.fix(m),m))
j_F = np.fft.fftshift(np.arange(-np.fix(n),n))
i_offset, j_offset = i_F[di]/2, j_F[dj]/2
if upsampling >2:
i_shift = 1 + np.round(i_offset*upsampling)/upsampling
j_shift = 1 + np.round(j_offset*upsampling)/upsampling
F_shift = np.fix(np.ceil(1.5*upsampling)/2)
CC = np.conj(upsample_dft(Q,\
up_m=np.ceil(upsampling*1.5),\
up_n=np.ceil(upsampling*1.5),\
upsampling=upsampling,\
i_offset=F_shift-(i_shift*upsampling),\
j_offset=F_shift-(j_shift*upsampling)))
ij = np.unravel_index(np.argmax(CC), CC.shape, order='F')
ddi, ddj = ij[::-1]
ddi -= (F_shift )
ddj -= (F_shift )
i_offset += ddi/upsampling
j_offset += ddj/upsampling
return i_offset,j_offset
def cross_corr(I1, I2):
""" match two imagery through cross correlation in FFT
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
phase_corr
References
----------
.. [1] <NAME>. "Evaluation of existing image matching methods for
deriving glacier surface displacements globally from optical satellite
imagery", Remote sensing of environment, vol. 118 pp. 339-355, 2012.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = cross_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
if i == 0:
Q = (S1)*np.conj(S2)
else:
Q_b = (S1)*np.conj(S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
Q = (S1)*np.conj(S2)
return Q
def binary_orientation_corr(I1, I2):
""" match two imagery through binary phase only correlation
Parameters
----------
I1 : np.array, size=(m,n), ndim={2,3}
array with intensities
I2 : np.array, size=(m,n), ndim={2,3}
array with intensities
Returns
-------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
See Also
--------
orientation_corr, phase_only_corr
References
----------
.. [1] Kumar & Juday, "Design of phase-only, binary phase-only, and complex
ternary matched filters with increased signal-to-noise ratios for
colored noise", Optics letters, vol. 16(13) pp. 1025--1027, 1991.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = binary_orientation_corr(im1, im2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
if (I1.ndim==3) or (I2.ndim==3): # multi-spectral frequency stacking
I1sub,I2sub = make_templates_same_size(I1,I2)
bands = I1.shape[2]
for i in range(bands): # loop through all bands
I1bnd, I2bnd = I1sub[:,:,i], I2sub[:,:,i]
S1, S2 = np.fft.fft2(I1bnd), np.fft.fft2(I2bnd)
W = np.sign(np.real(S2))
if i == 0:
Q = (S1)*np.conj(W*S2)
else:
Q_b = (S1)*np.conj(W*S2)
Q = (1/(i+1))*Q_b + (i/(i+1))*Q
else:
I1sub,I2sub = make_templates_same_size(I1,I2)
S1, S2 = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
W = np.sign(np.real(S2))
Q = (S1)*np.conj(W*S2)
return Q
def masked_corr(I1, I2, M1=np.array(()), M2=np.array(())):
""" match two imagery through masked normalized cross-correlation in FFT
Parameters
----------
I1 : np.array, size=(m,n), ndim=2
array with intensities
I2 : np.array, size=(m,n), ndim=2
array with intensities
M1 : np.array, size=(m,n)
array with mask
M2 : np.array, size=(m,n)
array with mask
Returns
-------
NCC : np.array, size=(m,n)
correlation surface
References
----------
.. [1] Padfield. "Masked object registration in the Fourier domain",
IEEE transactions on image processing, vol. 21(5) pp. 2706-2718, 2011.
Example
-------
>>> import numpy as np
>>> from .matching_tools import get_integer_peak_location
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> msk1,msk2 = np.ones_like(im1), np.ones_like(im2)
>>> Q = masked_corr(im1, im2, msk1, msk2)
>>> C = np.fft.ifft2(Q)
>>> di,dj,_,_ = get_integer_peak_location(C)
>>> assert(np.isclose(ti, di, atol=1))
>>> assert(np.isclose(ti, di, atol=1))
"""
assert type(I1)==np.ndarray, ('please provide an array')
assert type(I2)==np.ndarray, ('please provide an array')
assert type(M1)==np.ndarray, ('please provide an array')
assert type(M2)==np.ndarray, ('please provide an array')
# init
I1sub,I2sub = make_templates_same_size(I1,I2)
if M1.size==0 : M1 = np.ones_like(I1sub)
if M2.size==0 : M2 = np.ones_like(I2sub)
M1sub,M2sub = make_templates_same_size(M1,M2)
# preparation
I1f, I2f = np.fft.fft2(I1sub), np.fft.fft2(I2sub)
M1f, M2f = np.fft.fft2(M1sub), np.fft.fft2(M2sub)
fF1F2 = np.fft.ifft2( I1f* | np.conj(I2f) | numpy.conj |
import gym
import logging
logging.basicConfig(level=logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
import numpy as np
import sys
sys.path.append(r"code\Reforcement-Learning\DQN-CartPole")
from agent import Agent
from model import MyModel
from algorithm import DQN
from replay_memory import ReplayMemory
LEARN_FREQ = 5 # 训练频率,不需要每一个step都learn,积攒一些Experience后再learn,提高效率
MEMORY_SIZE = 200000 # replay memory大小,越大约占内存
MEMORY_WARMUP_SIZE = 200 # replay memory里需要先 预存一些Experience(再从里面sample一个batch的经验让agent去learn
BATCH_SIZE = 64 # batch size
LEARNING_RATE = 0.0005 # 学习率
GAMMA = 0.99 # reward衰减因子,一般取0.9~0.999不等
def run_train_episode(agent,env,rpm):
"""
参数:
agent - 智能体
env - 环境
rpm - replay memory
返回:
total_reward - 总奖励
"""
total_reward = 0
obs = env.reset() # 初始化环境
step = 0
while True:
step += 1
obs = np.expand_dims(obs,axis=0) # 拓展一个维度
action = agent.sample(obs) # 尝试一个动作
obs = | np.squeeze(obs) | numpy.squeeze |
from os import environ, remove
from tempfile import NamedTemporaryFile, mktemp
from unittest import TestCase, main
from numpy import (
arange,
array,
e,
greater_equal,
less_equal,
log,
nan,
sqrt,
zeros,
)
from cogent3 import (
DNA,
PROTEIN,
RNA,
load_aligned_seqs,
make_aligned_seqs,
make_tree,
)
from cogent3.core.alignment import ArrayAlignment
from cogent3.core.alphabet import CharAlphabet
from cogent3.evolve.coevolution import (
DEFAULT_NULL_VALUE,
AAGapless,
aln_position_pairs_cmp_threshold,
aln_position_pairs_ge_threshold,
aln_position_pairs_le_threshold,
ancestral_state_alignment,
ancestral_state_pair,
ancestral_state_position,
ancestral_states_input_validation,
build_coevolution_matrix_filepath,
calc_pair_scale,
coevolution_matrix_to_csv,
coevolve_alignment,
coevolve_alignments,
coevolve_alignments_validation,
coevolve_pair,
coevolve_position,
count_ge_threshold,
count_le_threshold,
csv_to_coevolution_matrix,
filter_exclude_positions,
filter_non_parsimony_informative,
filter_threshold_based_multiple_interdependency,
freqs_from_aln,
freqs_to_array,
get_allowed_perturbations,
get_ancestral_seqs,
get_dg,
get_dgg,
get_positional_frequencies,
get_positional_probabilities,
get_subalignments,
identify_aln_positions_above_threshold,
ignore_excludes,
is_parsimony_informative,
join_positions,
ltm_to_symmetric,
make_weights,
merge_alignments,
mi,
mi_alignment,
mi_pair,
mi_position,
n_random_seqs,
nmi,
nmi_alignment,
nmi_pair,
nmi_position,
normalized_mi,
parse_coevolution_matrix_filepath,
pickle_coevolution_result,
probs_from_dict,
protein_dict,
resampled_mi_alignment,
sca_alignment,
sca_input_validation,
sca_pair,
sca_position,
unpickle_coevolution_result,
validate_alignment,
validate_alphabet,
validate_ancestral_seqs,
validate_position,
validate_tree,
)
from cogent3.maths.stats.number import CategoryCounter
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2022, The Cogent Project"
__credits__ = ["<NAME>"]
__license__ = "BSD-3"
__version__ = "2022.4.20a1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Beta"
from numpy.testing import assert_allclose, assert_equal
class CoevolutionTests(TestCase):
"""Tests of coevolution.py"""
def setUp(self):
"""Set up variables for us in tests"""
self.run_slow_tests = int(environ.get("TEST_SLOW_APPC", 0))
# Data used in SCA tests
self.dna_aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGT", "AGCT", "ACCC", "TAGG"])),
moltype=DNA,
)
self.rna_aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGU", "AGCU", "ACCC", "UAGG"])),
moltype=RNA,
)
self.protein_aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGP", "AGCT", "ACCC", "TAGG"])),
moltype=PROTEIN,
)
self.dna_aln_gapped = ArrayAlignment(
data=list(zip(list(range(4)), ["A-CGT", "AGC-T", "-ACCC", "TAGG-"])),
moltype=DNA,
)
self.freq = ArrayAlignment(
data=list(
zip(
list(range(20)),
[
"TCT",
"CCT",
"CCC",
"CCC",
"CCG",
"CC-",
"AC-",
"AC-",
"AA-",
"AA-",
"GA-",
"GA-",
"GA-",
"GA-",
"GA-",
"G--",
"G--",
"G--",
"G--",
"G--",
],
)
),
moltype=PROTEIN,
)
self.two_pos = ArrayAlignment(
data=list(
zip(
list(map(str, list(range(20)))),
[
"TC",
"CC",
"CC",
"CC",
"CC",
"CC",
"AC",
"AC",
"AA",
"AA",
"GA",
"GA",
"GA",
"GA",
"GA",
"GT",
"GT",
"GT",
"GT",
"GT",
],
)
),
moltype=PROTEIN,
)
self.tree20 = make_tree(treestring=tree20_string)
self.gpcr_aln = gpcr_aln
self.myos_aln = myos_aln
# a made-up dict of base frequencies to use as the natural freqs
# for SCA calcs on DNA seqs
self.dna_base_freqs = dict(list(zip("ACGT", [0.25] * 4)))
self.rna_base_freqs = dict(list(zip("ACGU", [0.25] * 4)))
self.protein_aln4 = ArrayAlignment(
[("A1", "AACF"), ("A12", "AADF"), ("A123", "ADCF"), ("A111", "AAD-")],
moltype=PROTEIN,
)
self.rna_aln4 = ArrayAlignment(
[("A1", "AAUU"), ("A12", "ACGU"), ("A123", "UUAA"), ("A111", "AAA-")],
moltype=RNA,
)
self.dna_aln4 = ArrayAlignment(
[("A1", "AATT"), ("A12", "ACGT"), ("A123", "TTAA"), ("A111", "AAA?")],
moltype=DNA,
)
self.tree4 = make_tree(
treestring="((A1:0.5,A111:0.5):0.5,(A12:0.5,A123:0.5):0.5);"
)
def test_alignment_analyses_moltype_protein(self):
"""alignment methods work with moltype = PROTEIN"""
r = mi_alignment(self.protein_aln4)
self.assertEqual(r.shape, (4, 4))
r = nmi_alignment(self.protein_aln4)
self.assertEqual(r.shape, (4, 4))
r = sca_alignment(self.protein_aln4, cutoff=0.75)
self.assertEqual(r.shape, (4, 4))
r = ancestral_state_alignment(self.protein_aln4, self.tree4)
self.assertEqual(r.shape, (4, 4))
def test_alignment_analyses_moltype_rna(self):
"""alignment methods work with moltype = RNA"""
r = mi_alignment(self.rna_aln4)
self.assertEqual(r.shape, (4, 4))
r = nmi_alignment(self.rna_aln4)
self.assertEqual(r.shape, (4, 4))
r = sca_alignment(
self.rna_aln4,
cutoff=0.75,
alphabet="ACGU",
background_freqs=self.rna_base_freqs,
)
self.assertEqual(r.shape, (4, 4))
r = ancestral_state_alignment(self.rna_aln4, self.tree4)
self.assertEqual(r.shape, (4, 4))
def test_alignment_analyses_moltype_dna(self):
"""alignment methods work with moltype = DNA"""
r = mi_alignment(self.dna_aln4)
self.assertEqual(r.shape, (4, 4))
r = nmi_alignment(self.dna_aln4)
self.assertEqual(r.shape, (4, 4))
r = sca_alignment(
self.dna_aln4,
cutoff=0.75,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
self.assertEqual(r.shape, (4, 4))
r = ancestral_state_alignment(self.dna_aln4, self.tree4)
self.assertEqual(r.shape, (4, 4))
def test_join_positions(self):
"""join_positions functions as expected"""
self.assertEqual(
join_positions(list("ABCD"), list("WXYZ")), ["AW", "BX", "CY", "DZ"]
)
self.assertEqual(join_positions(list("AAA"), list("BBB")), ["AB", "AB", "AB"])
self.assertEqual(join_positions([], []), [])
def test_mi(self):
"""mi calculations function as expected with valid data"""
assert_allclose(mi(1.0, 1.0, 1.0), 1.0)
assert_allclose(mi(1.0, 1.0, 2.0), 0.0)
assert_allclose(mi(1.0, 1.0, 1.5), 0.5)
def test_normalized_mi(self):
"""normalized mi calculations function as expected with valid data"""
assert_allclose(normalized_mi(1.0, 1.0, 1.0), 1.0)
assert_allclose(normalized_mi(1.0, 1.0, 2.0), 0.0)
assert_allclose(normalized_mi(1.0, 1.0, 1.5), 0.3333, 3)
def test_mi_pair(self):
"""mi_pair calculates mi from a pair of columns"""
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), 0.0)
aln = ArrayAlignment(data={"1": "AB", "2": "BA"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), 1.0)
# order of positions doesn't matter (when it shouldn't)
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), mi_pair(aln, pos1=1, pos2=0))
aln = ArrayAlignment(data={"1": "AB", "2": "BA"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), mi_pair(aln, pos1=1, pos2=0))
def test_wrapper_functions_handle_invalid_parameters(self):
"""coevolve_*: functions error on missing parameters"""
# missing cutoff
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
self.assertRaises(ValueError, coevolve_pair, sca_pair, aln, 0, 1)
self.assertRaises(ValueError, coevolve_position, sca_position, aln, 0)
self.assertRaises(ValueError, coevolve_alignment, sca_alignment, aln)
self.assertRaises(ValueError, coevolve_alignments, sca_alignment, aln, aln)
def test_coevolve_pair(self):
"""coevolve_pair: returns same as pair methods called directly"""
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# mi_pair == coevolve_pair(mi_pair,...)
assert_allclose(
coevolve_pair(mi_pair, aln, pos1=0, pos2=1), mi_pair(aln, pos1=0, pos2=1)
)
assert_allclose(
coevolve_pair(nmi_pair, aln, pos1=0, pos2=1), nmi_pair(aln, pos1=0, pos2=1)
)
assert_allclose(
coevolve_pair(ancestral_state_pair, aln, pos1=0, pos2=1, tree=t),
ancestral_state_pair(aln, pos1=0, pos2=1, tree=t),
)
assert_allclose(
coevolve_pair(sca_pair, aln, pos1=0, pos2=1, cutoff=cutoff),
sca_pair(aln, pos1=0, pos2=1, cutoff=cutoff),
)
def test_coevolve_position(self):
"""coevolve_position: returns same as position methods called directly"""
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# mi_position == coevolve_position(mi_position,...)
assert_allclose(
coevolve_position(mi_position, aln, position=0),
mi_position(aln, position=0),
)
assert_allclose(
coevolve_position(nmi_position, aln, position=0),
nmi_position(aln, position=0),
)
assert_allclose(
coevolve_position(ancestral_state_position, aln, position=0, tree=t),
ancestral_state_position(aln, position=0, tree=t),
)
assert_allclose(
coevolve_position(sca_position, aln, position=0, cutoff=cutoff),
sca_position(aln, position=0, cutoff=cutoff),
)
def test_coevolve_alignment(self):
"""coevolve_alignment: returns same as alignment methods"""
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# mi_alignment == coevolve_alignment(mi_alignment,...)
assert_allclose(coevolve_alignment(mi_alignment, aln), mi_alignment(aln))
assert_allclose(coevolve_alignment(mip_alignment, aln), mip_alignment(aln))
assert_allclose(coevolve_alignment(mia_alignment, aln), mia_alignment(aln))
assert_allclose(coevolve_alignment(nmi_alignment, aln), nmi_alignment(aln))
assert_allclose(
coevolve_alignment(ancestral_state_alignment, aln, tree=t),
ancestral_state_alignment(aln, tree=t),
)
assert_allclose(
coevolve_alignment(sca_alignment, aln, cutoff=cutoff),
sca_alignment(aln, cutoff=cutoff),
)
def test_coevolve_alignments_validation_idenifiers(self):
"""coevolve_alignments_validation: seq/tree validation functions"""
method = sca_alignment
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
# OK w/ no tree
coevolve_alignments_validation(method, aln1, aln2, 2, None)
# OK w/ tree
coevolve_alignments_validation(method, aln1, aln2, 2, None, tree=t)
# If there is a plus present in identifiers, we only care about the
# text before the colon
aln1 = ArrayAlignment(data={"1+a": "AC", "2+b": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1 + c": "EFW", "2 + d": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1+e:0.5,2 + f:0.5);")
# OK w/ no tree
coevolve_alignments_validation(method, aln1, aln2, 2, None)
# OK w/ tree
coevolve_alignments_validation(method, aln1, aln2, 2, None, tree=t)
# mismatch b/w alignments seq names
aln1 = ArrayAlignment(data={"3": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
# mismatch b/w alignments and tree seq names
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(3:0.5,2:0.5);")
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
# mismatch b/w alignments in number of seqs
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD", "3": "AA"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
self.assertRaises(
AssertionError, coevolve_alignments_validation, method, aln1, aln2, 2, None
)
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
# mismatch b/w alignments & tree in number of seqs
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,(2:0.5,3:0.25));")
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
def test_coevolve_alignments_validation_min_num_seqs(self):
"""coevolve_alignments_validation: ValueError on fewer than min_num_seqs"""
method = mi_alignment
# too few sequences -> ValueError
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
coevolve_alignments_validation(method, aln1, aln2, 1, None)
coevolve_alignments_validation(method, aln1, aln2, 2, None)
self.assertRaises(
ValueError, coevolve_alignments_validation, method, aln1, aln2, 3, None
)
def test_coevolve_alignments_validation_max_num_seqs(self):
"""coevolve_alignments_validation: min_num_seqs <= max_num_seqs"""
method = mi_alignment
# min_num_seqs > max_num_seqs-> ValueError
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
coevolve_alignments_validation(method, aln1, aln2, 1, None)
coevolve_alignments_validation(method, aln1, aln2, 1, 3)
coevolve_alignments_validation(method, aln1, aln2, 2, 3)
self.assertRaises(
ValueError, coevolve_alignments_validation, method, aln1, aln2, 3, 2
)
def test_coevolve_alignments_validation_moltypes(self):
"""coevolve_alignments_validation: valid for acceptable moltypes"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AU"}, moltype=RNA)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
# different moltype
coevolve_alignments_validation(mi_alignment, aln1, aln2, 2, None)
coevolve_alignments_validation(nmi_alignment, aln1, aln2, 2, None)
coevolve_alignments_validation(resampled_mi_alignment, aln1, aln2, 2, None)
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
sca_alignment,
aln1,
aln2,
2,
None,
)
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
ancestral_state_alignment,
aln1,
aln2,
2,
None,
)
def test_coevolve_alignments(self):
"""coevolve_alignments: returns correct len(aln1) x len(aln2) matrix"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(
data={"1": "ACEFW", "2": "ADEGY"}, moltype=PROTEIN
)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# MI
m = mi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(mi_alignment, aln1, aln2), expected)
# MI (return_full=True)
assert_allclose(
coevolve_alignments(mi_alignment, aln1, aln2, return_full=True), m
)
# NMI
m = nmi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(nmi_alignment, aln1, aln2), expected)
# AS
m = ancestral_state_alignment(combined_aln, tree=t)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(
coevolve_alignments(ancestral_state_alignment, aln1, aln2, tree=t), expected
)
# SCA
m = sca_alignment(combined_aln, cutoff=cutoff)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(
coevolve_alignments(sca_alignment, aln1, aln2, cutoff=cutoff), expected
)
def test_coevolve_alignments_watches_min_num_seqs(self):
"""coevolve_alignments: error on too few sequences"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
coevolve_alignments(mi_alignment, aln1, aln2)
coevolve_alignments(mi_alignment, aln1, aln2, min_num_seqs=0)
coevolve_alignments(mi_alignment, aln1, aln2, min_num_seqs=1)
coevolve_alignments(mi_alignment, aln1, aln2, min_num_seqs=2)
self.assertRaises(
ValueError, coevolve_alignments, mi_alignment, aln1, aln2, min_num_seqs=3
)
self.assertRaises(
ValueError, coevolve_alignments, mi_alignment, aln1, aln2, min_num_seqs=50
)
def test_coevolve_alignments_watches_max_num_seqs(self):
"""coevolve_alignments: filtering or error on too many sequences"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD", "3": "YP"}, moltype=PROTEIN)
aln2 = ArrayAlignment(
data={"1": "ACP", "2": "EAD", "3": "PYP"}, moltype=PROTEIN
)
# keep all seqs
tmp_filepath = NamedTemporaryFile(
prefix="tmp_test_coevolution", suffix=".fasta"
).name
coevolve_alignments(
mi_alignment, aln1, aln2, max_num_seqs=3, merged_aln_filepath=tmp_filepath
)
self.assertEqual(load_aligned_seqs(tmp_filepath).num_seqs, 3)
# keep 2 seqs
coevolve_alignments(
mi_alignment, aln1, aln2, max_num_seqs=2, merged_aln_filepath=tmp_filepath
)
seqs = load_aligned_seqs(tmp_filepath)
self.assertEqual(seqs.num_seqs, 2)
# error if no sequence filter
self.assertRaises(
ValueError,
coevolve_alignments,
mi_alignment,
aln1,
aln2,
max_num_seqs=2,
merged_aln_filepath=tmp_filepath,
sequence_filter=None,
)
# clean up the temporary file
remove(tmp_filepath)
def test_coevolve_alignments_different_MolType(self):
"""coevolve_alignments: different MolTypes supported"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AU"}, moltype=RNA)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(data={"1": "ACEFW", "2": "AUEGY"})
t = make_tree(treestring="(1:0.5,2:0.5);")
# MI
m = mi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(mi_alignment, aln1, aln2), expected)
# MI (return_full=True)
assert_allclose(
coevolve_alignments(mi_alignment, aln1, aln2, return_full=True), m
)
# NMI
m = nmi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(nmi_alignment, aln1, aln2), expected)
def test_mi_pair_cols_default_exclude_handling(self):
"""mi_pair returns null_value on excluded by default"""
aln = ArrayAlignment(data={"1": "AB", "2": "-B"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
aln = ArrayAlignment(data={"1": "-B", "2": "-B"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
aln = ArrayAlignment(data={"1": "AA", "2": "-B"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
aln = ArrayAlignment(data={"1": "AA", "2": "PB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, excludes="P"), DEFAULT_NULL_VALUE)
def test_mi_pair_cols_non_default_exclude_handling(self):
"""mi_pair uses non-default exclude_handler when provided"""
aln = ArrayAlignment(data={"1": "A-", "2": "A-"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
assert_allclose(
mi_pair(aln, pos1=0, pos2=1, exclude_handler=ignore_excludes), 0.0
)
def test_mi_pair_cols_and_entropies(self):
"""mi_pair calculates mi from a pair of columns and precalc entropies"""
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, h1=0.0, h2=0.0), 0.0)
aln = ArrayAlignment(data={"1": "AB", "2": "BA"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, h1=1.0, h2=1.0), 1.0)
# incorrect positional entropies provided to ensure that the
# precalculated values are used, and that entorpies are not
# caluclated on-the-fly.
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, h1=1.0, h2=1.0), 2.0)
def test_mi_pair_alt_calculator(self):
"""mi_pair uses alternate mi_calculator when provided"""
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), 0.0)
assert_allclose(
mi_pair(aln, pos1=0, pos2=1, mi_calculator=normalized_mi),
DEFAULT_NULL_VALUE,
)
def test_mi_position_valid_input(self):
"""mi_position functions with varied valid input"""
aln = ArrayAlignment(data={"1": "ACG", "2": "GAC"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([1.0, 1.0, 1.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([0.0, 0.0, 0.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 2), array([0.0, 0.0, 0.0]))
def test_mi_position_from_alignment_nmi(self):
"""mi_position functions w/ alternate mi_calculator"""
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([0.0, 0.0, 0.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(
mi_position(aln, 0, mi_calculator=normalized_mi),
array([DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE]),
)
def test_mi_position_from_alignment_default_exclude_handling(self):
"""mi_position handles excludes by setting to null_value"""
aln = ArrayAlignment(data={"1": "ACG", "2": "G-C"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([1.0, DEFAULT_NULL_VALUE, 1.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "GPC"}, moltype=PROTEIN)
assert_allclose(
mi_position(aln, 0, excludes="P"), | array([1.0, DEFAULT_NULL_VALUE, 1.0]) | numpy.array |
"""
Created on June 21, 2018
@author: Moritz
"""
import numpy as np
from spn.experiments.AQP.leaves.static.StaticNumeric import StaticNumeric
def identity_likelihood(node, data, dtype=np.float64):
assert len(node.scope) == 1, node.scope
probs = np.zeros((data.shape[0], 1), dtype=dtype)
nd = data[:, node.scope[0]]
for i, val in enumerate(nd):
if np.isnan(val):
probs[i] = 1
else:
lower = np.searchsorted(node.vals, val, side="left")
higher = | np.searchsorted(node.vals, val, side="right") | numpy.searchsorted |
# -*- coding: utf-8 -*-
"""
@author: jzh
"""
import numpy as np, keras.backend as K
import tensorflow as tf
from keras.optimizers import Adam
from keras.layers import Input
from keras.models import Model
from src.VAE import get_gcn, get_gcn_vae_id, get_gcn_vae_exp
from src.data_utils import normalize_fromfile, denormalize_fromfile, data_recover, batch_change
from src.get_mesh import get_mesh
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh, ArpackNoConvergence
from src.mesh import V2M2
ref_name = 'data/disentangle/Mean_Face.obj'
'''
GCN code was inspired by https://github.com/tkipf/keras-gcn
'''
def get_general_laplacian(adj):
return (sp.diags(np.power(np.array(adj.sum(1)), 1).flatten(), 0) - adj) * sp.diags(np.power(np.array(adj.sum(1)), -1).flatten(), 0)
def normalize_adj(adj, symmetric=True):
if symmetric:
d = sp.diags(np.power(np.array(adj.sum(1)), -0.5).flatten(), 0)
a_norm = adj.dot(d).transpose().dot(d).tocsr()
else:
d = sp.diags(np.power(np.array(adj.sum(1)), -1).flatten(), 0)
a_norm = d.dot(adj).tocsr()
return a_norm
def normalized_laplacian(adj, symmetric=True):
adj_normalized = normalize_adj(adj, symmetric)
laplacian = (sp.eye(adj.shape[0], dtype=np.float32)) - adj_normalized
return laplacian
def preprocess_adj(adj, symmetric=True):
adj = adj + sp.eye(adj.shape[0])
adj = normalize_adj(adj, symmetric)
return adj
def rescale_laplacian(laplacian):
try:
print('Calculating largest eigenvalue of normalized graph Laplacian...')
largest_eigval = (eigsh(laplacian, 1, which='LM', return_eigenvectors=False))[0]
except ArpackNoConvergence:
print('Eigenvalue calculation did not converge! Using largest_eigval=2 instead.')
largest_eigval = 2
scaled_laplacian = 2.0 / largest_eigval * laplacian - sp.eye(laplacian.shape[0])
return scaled_laplacian
def chebyshev_polynomial(X, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices."""
print(('Calculating Chebyshev polynomials up to order {}...').format(k))
T_k = list()
T_k.append(sp.eye(X.shape[0]).tocsr())
T_k.append(X)
def chebyshev_recurrence(T_k_minus_one, T_k_minus_two, X):
X_ = sp.csr_matrix(X, copy=True)
return 2 * X_.dot(T_k_minus_one) - T_k_minus_two
for i in range(2, k + 1):
T_k.append(chebyshev_recurrence(T_k[-1], T_k[-2], X))
T_k = [i.astype(np.float32) for i in T_k]
return T_k
class gcn_dis_model(object):
def __init__(self, input_dim, prefix, suffix, lr, load, feature_dim=9, latent_dim_id=50, latent_dim_exp=25, kl_weight=0.000005,weight_decay = 0.00001, batch_size=1, MAX_DEGREE=2):
self.input_dim = input_dim
self.prefix = prefix
self.suffix = suffix
self.load = load
self.latent_dim_id = latent_dim_id
self.latent_dim_exp = latent_dim_exp
self.feature_dim = feature_dim
self.v = int(input_dim / feature_dim)
self.hidden_dim = 300
self.lr = lr
self.kl_weight = K.variable(kl_weight)
self.M_list = np.load(('data/{}/max_data.npy').format(self.prefix))
self.m_list = np.load(('data/{}/min_data.npy').format(self.prefix))
self.batch_size = batch_size
self.weight_decay = K.variable(weight_decay)
self.build_model(MAX_DEGREE)
class disentangle_model_vae_id(gcn_dis_model):
def build_model(self, MAX_DEGREE):
SYM_NORM = True
A = sp.load_npz(('data/{}/FWH_adj_matrix.npz').format(self.prefix))
L = normalized_laplacian(A, SYM_NORM)
T_k = chebyshev_polynomial(rescale_laplacian(L), MAX_DEGREE)
support = MAX_DEGREE + 1
self.kl_loss, self.encoder, self.decoder, self.gcn_vae_id = get_gcn_vae_id(T_k, support, batch_size=self.batch_size, feature_dim=self.feature_dim, v=self.v, input_dim=self.input_dim, latent_dim = self.latent_dim_id)
self.neutral_face = Input(shape=(self.input_dim,))
real = self.gcn_vae_id.get_input_at(0)
ratio = K.variable(self.M_list - self.m_list)
if self.feature_dim == 9:
self.id_loss = K.mean(K.abs((self.neutral_face - self.gcn_vae_id(real)) * ratio))/1.8
else:
ori_mesh = K.reshape(((self.neutral_face - self.gcn_vae_id(real)) * ratio), (self.batch_size, -1, 3))
self.id_loss = K.mean(K.sqrt(K.sum(K.square(ori_mesh) ,axis=-1)))/1.8
weights = self.gcn_vae_id.trainable_weights#+[self.scalar]
self.regularization_loss = 0
for w in weights:
#print(w)
if self.feature_dim == 9:
self.regularization_loss += self.weight_decay* K.sum(K.square(w))
else:
self.regularization_loss += 0.00002* K.sum(K.square(w))
self.loss = self.id_loss + self.kl_weight * self.kl_loss + self.regularization_loss
self.opt = Adam(lr=self.lr)
training_updates = (self.opt).get_updates(weights, [], self.loss)
self.train_func = K.function([real, self.neutral_face], [self.id_loss, self.loss, self.kl_loss, self.regularization_loss], training_updates)
self.test_func = K.function([real, self.neutral_face], [self.id_loss, self.loss, self.kl_loss, self.regularization_loss])
if self.load:
self.load_models()
def save_models(self):
self.gcn_vae_id.save_weights(('model/gcn_vae_id_model/gcn_vae_id{}{}.h5').format(self.prefix, self.suffix))
self.encoder.save_weights(('model/gcn_vae_id_model/encoder_id_{}{}.h5').format(self.prefix, self.suffix))
self.decoder.save_weights(('model/gcn_vae_id_model/decoder_id_{}{}.h5').format(self.prefix, self.suffix))
def load_models(self):
self.gcn_vae_id.load_weights(('model/gcn_vae_id_model/gcn_vae_id{}{}.h5').format(self.prefix, self.suffix))
def code_bp(self, epoch):
#test_array = np.vstack(batch_change(np.fromfile('data/disentangle/real_data/{}.dat'.format(i))) for i in range(287))
test_array = np.load('data/{}/test_data.npy'.format(self.prefix))[47*np.arange(10)]
frt = np.loadtxt('src/front_part_v.txt', dtype = int)
mask = np.zeros(11510)
mask[frt] = 1
normalize_fromfile(test_array, self.M_list, self.m_list)
num = 0
target_feature = test_array[num:num+1]
#x = [0,2,6,7,8]
K.set_learning_phase(0)
start = self.encoder.predict(target_feature, batch_size = self.batch_size)
code = K.variable(start[0])
target_feature_holder = Input(shape=(self.input_dim, ))
mask = K.variable(np.repeat(mask, 9))
ratio = K.variable(self.M_list - self.m_list)
cross_id = K.variable(np.tile(np.array([1,0,0,1,0,1,0,0,0]), 11510))
target = self.decoder(code)
loss = K.mean(K.abs(ratio*(target - target_feature_holder)))/1.8
lr = self.lr
for circle in range(10):
training_updates = (Adam(lr=lr)).get_updates([code], [], loss)
bp_func = K.function([target_feature_holder], [loss, target], training_updates)
for i in range(epoch):
err, result_mesh = bp_func([target_feature])
print('Epoch: {}, loss: {}'.format(i,err))
lr = input('learning rate change? ')
lr = float(lr)
if lr == 0:
break
start_norm = self.decoder.predict(start[0],batch_size = self.batch_size)
start_id = denormalize_fromfile(start_norm, self.M_list, self.m_list)
result_id = denormalize_fromfile(result_mesh, self.M_list, self.m_list)
denormalize_fromfile(target_feature, self.M_list, self.m_list)
import shutil, os
shutil.rmtree('data/mesh')
os.mkdir('data/mesh')
V2M2(get_mesh(ref_name, data_recover(start_id)), 'data/mesh/start_id.obj')
V2M2(get_mesh(ref_name, data_recover(result_id)), 'data/mesh/result_id.obj')
V2M2(get_mesh(ref_name, data_recover(target_feature)), 'data/mesh/target_id.obj')
def train(self, epoch):
def get_interpolate_data(prefix, num = 2000):
if prefix == 'disentangle':
#interpolate_data = np.vstack(batch_change(np.fromfile('data/{}/real_data/{}.dat'.format(prefix, i))) for i in range(num))
interpolate_data = np.vstack(batch_change(np.fromfile('data/{}/Interpolated_results/interpolated_{}.dat'.format(prefix, i))) for i in range(num))
else:
interpolate_data = np.vstack(np.fromfile('data/{}/Interpolated_results/interpolated_{}.dat'.format(prefix, i)) for i in range(num))
mean_inter = np.mean(interpolate_data, axis = 0)
interpolate_data = interpolate_data - mean_inter
return interpolate_data
inter_array = get_interpolate_data(self.prefix, 4000)
data_array = np.load(('data/{}/train_data.npy').format(self.prefix))
test_array = np.load(('data/{}/test_data.npy').format(self.prefix))
mean_exp = np.load(('data/{}/MeanFace_data.npy').format(self.prefix))
normalize_fromfile(test_array, self.M_list, self.m_list)
normalize_fromfile(mean_exp, self.M_list, self.m_list)
normalize_fromfile(data_array, self.M_list, self.m_list)
normalize_fromfile(inter_array, self.M_list, self.m_list)
ITS = data_array.shape[0]//self.batch_size
log = np.zeros((epoch*ITS,))
test_log = np.zeros((epoch*ITS,))
constant_list = np.arange(data_array.shape[0])
inter_list = np.arange(inter_array.shape[0])
display_step = 50
for i in range(epoch):
np.random.shuffle(constant_list)
np.random.shuffle(inter_list)
# for index, j in enumerate(constant_list):
for index, j in enumerate(zip(*[iter(constant_list)]*self.batch_size)):
# l = np.random.randint(0, 47)
l = np.random.randint(0,47,self.batch_size)
inter_sample = np.random.randint(0,inter_array.shape[0],self.batch_size)
#l = 1
j = np.array(j)
C_exp = j % 47
C_neutral = j - C_exp
people_with_emotion = data_array[j]
people_neutral_face = data_array[C_neutral]
C_int = inter_list[(index*self.batch_size) %inter_array.shape[0]: (index * self.batch_size)%inter_array.shape[0]+self.batch_size]
inter_people = inter_array[C_int]
m = np.random.randint(0, 47, inter_people.shape[0])
inter_people_emotion = inter_people + mean_exp[m] + 0.9*(self.M_list + self.m_list)/(self.M_list - self.m_list)
K.set_learning_phase(1)
K.set_value(self.opt.lr, self.lr*10)
err_re_inter, err_total_inter, err_kl, err_regular = self.train_func([inter_people_emotion, inter_people])
K.set_value(self.opt.lr, self.lr*0.1)
err_re_emoti, err_total_emoti, err_kl, err_regular = self.train_func([people_with_emotion, people_neutral_face])
err_re = err_re_emoti#(err_re_inter + err_re_emoti)/2
err_total = (err_total_inter + err_total_emoti)/2
k = np.random.randint(0, 10*47,self.batch_size)
test_emotion = test_array[k]
test_neutral = test_array[k-(k%47)]
K.set_learning_phase(0)
eval_re, eval_total, eval_kl, eval_regular = self.test_func([test_emotion, test_neutral])
if index%display_step == 0:
print(('Epoch: {:3}, total_loss: {:8.4f}, re_loss: {:8.4f}, kl_loss: {:8.4f}, regular: {:8.4f}, eval: {:8.4f}, eval_re: {:8.4f}, eval_kl: {:8.4f}').format(i, err_total, err_re, err_kl, err_regular, eval_total, eval_re, eval_kl))
log[i*ITS + index] += err_re
test_log[i*ITS + index] += eval_re
np.save('log', log)
np.save('testlog', test_log)
self.save_models()
def special_train(self, epoch):
def get_interpolate_data(prefix, num = 2000):
if prefix == 'disentangle':
#interpolate_data = np.vstack(batch_change(np.fromfile('data/{}/real_data/{}.dat'.format(prefix, i))) for i in range(num))
interpolate_data = np.vstack(batch_change(np.fromfile('data/{}/Interpolated_results/interpolated_{}.dat'.format(prefix, i))) for i in range(num))
else:
interpolate_data = np.vstack(np.fromfile('data/{}/real_data/{}.dat'.format(prefix, i)) for i in range(num))
mean_inter = np.mean(interpolate_data, axis = 0)
interpolate_data = interpolate_data - mean_inter
return interpolate_data
data_array = np.load(('data/{}/train_data.npy').format(self.prefix))[47*np.arange(140)]
test_array = np.load(('data/{}/test_data.npy').format(self.prefix))[47*np.arange(10)]
inter_array = get_interpolate_data(self.prefix)
normalize_fromfile(inter_array, self.M_list, self.m_list)
normalize_fromfile(data_array, self.M_list, self.m_list)
normalize_fromfile(test_array, self.M_list, self.m_list)
data_array = np.concatenate([data_array, inter_array])
log = np.zeros((epoch,))
test_log = np.zeros((epoch,))
constant_list = np.arange(data_array.shape[0])
display_step = 50
for i in range(epoch):
np.random.shuffle(constant_list)
for index, j in enumerate(zip(*[iter(constant_list)]*self.batch_size)):
test_idx = np.random.randint(0,10,self.batch_size)
test_emotion = test_array[test_idx]
people_with_emotion = data_array[np.array(j)]
K.set_learning_phase(1)
err_re, err_total, err_kl, err_regular = self.train_func([people_with_emotion, people_with_emotion])
K.set_learning_phase(0)
eval_re, eval_total, eval_kl, eval_regular = self.test_func([test_emotion, test_emotion])
if index%display_step == 0:
print(('Epoch: {:3}, total_loss: {:8.4f}, re_loss: {:8.4f}, kl_loss: {:8.4f}, regular: {:8.4f}, eval: {:8.4f}, eval_re: {:8.4f}, eval_kl: {:8.4f}').format(i, err_total, err_re, err_kl, err_regular, eval_total, eval_re, eval_kl))
log[i] += err_total
test_log[i] += eval_total
np.save('log', log)
np.save('testlog', test_log)
self.save_models()
def test(self, limit=5, filename='test', people_id=142):
data = np.load(('data/{}/{}_data/Feature{}.npy').format(self.prefix, filename, people_id))
data_array = data.copy()
normalize_fromfile(data_array, self.M_list, self.m_list)
err_re, err_total, err_kl, _ = self.test_func([data_array[24:25], data_array[:1]])
print(err_re)
feature_id = denormalize_fromfile(self.gcn_vae_id.predict(data_array, batch_size=self.batch_size), self.M_list, self.m_list)
import shutil, os
shutil.rmtree('data/mesh')
os.mkdir('data/mesh')
for i in (0, 1, 2, 22, 24, 25, 37, 39):
V2M2(get_mesh(ref_name, data_recover(feature_id[i])), ('data/mesh/id_{}_{}.obj').format(self.prefix, i))
V2M2(get_mesh(ref_name, data_recover(data[i])), ('data/mesh/ori_{}_{}.obj').format(self.prefix, i))
class disentangle_model_vae_exp(gcn_dis_model):
def build_model(self, MAX_DEGREE):
SYM_NORM = True
A = sp.load_npz(('data/{}/FWH_adj_matrix.npz').format(self.prefix))
L = normalized_laplacian(A, SYM_NORM)
T_k = chebyshev_polynomial(rescale_laplacian(L), MAX_DEGREE)
support = MAX_DEGREE + 1
self.kl_loss, self.encoder, self.decoder, self.gcn_vae_exp = get_gcn_vae_exp(T_k, support, batch_size=self.batch_size, feature_dim=self.feature_dim, v=self.v, input_dim=self.input_dim, latent_dim = self.latent_dim_exp)
self.mean_exp = Input(shape=(self.input_dim,))
real = self.gcn_vae_exp.get_input_at(0)
ratio = K.variable(self.M_list - self.m_list)
# L2 when xyz, L1 when rimd
if self.feature_dim == 9:
#self.away_loss = 0.001/K.mean(K.abs(0.9*s- ( self.gcn_vae_exp(real)) * ratio))
self.exp_loss = K.mean(K.abs((self.mean_exp - self.gcn_vae_exp(real)) * ratio )) / 1.8 #+ self.away_loss
else:
self.exp_loss = K.mean(K.square((self.mean_exp - self.gcn_vae_exp(real)) * ratio )) * 100
self.loss = self.exp_loss + self.kl_weight * self.kl_loss
weights = self.gcn_vae_exp.trainable_weights
training_updates = (Adam(lr=self.lr)).get_updates(weights, [], self.loss)
self.train_func = K.function([real, self.mean_exp], [self.exp_loss, self.loss, self.kl_loss], training_updates)
self.test_func = K.function([real, self.mean_exp], [self.exp_loss, self.loss, self.kl_loss])
if self.load:
self.load_models()
def save_models(self):
self.gcn_vae_exp.save_weights(('model/gcn_vae_exp_model/gcn_vae_exp{}{}.h5').format(self.prefix, self.suffix))
self.encoder.save_weights(('model/gcn_vae_exp_model/encoder_exp_{}{}.h5').format(self.prefix, self.suffix))
self.decoder.save_weights(('model/gcn_vae_exp_model/decoder_exp_{}{}.h5').format(self.prefix, self.suffix))
def load_models(self):
self.gcn_vae_exp.load_weights(('model/gcn_vae_exp_model/gcn_vae_exp{}{}.h5').format(self.prefix, self.suffix))
def train(self, epoch):
data_array = np.load(('data/{}/train_data.npy').format(self.prefix))
test_array = np.load(('data/{}/test_data.npy').format(self.prefix))
mean_exp = np.load(('data/{}/MeanFace_data.npy').format(self.prefix))
normalize_fromfile(mean_exp, self.M_list, self.m_list)
normalize_fromfile(data_array, self.M_list, self.m_list)
normalize_fromfile(test_array, self.M_list, self.m_list)
log = np.zeros((epoch,))
test_log = np.zeros((epoch,))
constant_list = np.arange(6580)
for i in range(epoch):
k = np.random.randint(1, 11)
test_emotion = test_array[k * 47 - 47:k * 47]
np.random.shuffle(constant_list)
for j in constant_list:
l = np.random.randint(0, 47)
C_exp = j % 47
people_with_emotion = data_array[j:j + 1]
exp = mean_exp[C_exp:C_exp+1]
err_re, err_total, err_kl = self.train_func([people_with_emotion, exp])
eval_re, eval_total, eval_kl = self.test_func([test_emotion[l:l + 1], mean_exp[l:l+1]])
print(('Epoch: {:3}, people: {:4}, total_loss: {:8.6f}, re_loss: {:8.6f}, kl_loss: {:8.4f}, eval: {:8.6f}, eval_re: {:8.6f}, eval_kl: {:8.4f}').format(i, j, err_total, err_re, err_kl, eval_total, eval_re, eval_kl))
log[i] += err_total
test_log[i] += eval_total
| np.save('log', log) | numpy.save |
# Double pendulum formula translated from the C code at
# http://www.physics.usyd.edu.au/~wheat/dpend_html/solve_dpend.c
from numpy import sin, cos
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import matplotlib.animation as animation
G = 9.81 # acceleration due to gravity, in m/s^2
L1 = 1.0 # length of pendulum 1 in m
L2 = 1.0 # length of pendulum 2 in m
M1 = 1.0 # mass of pendulum 1 in kg
M2 = 1.0 # mass of pendulum 2 in kg
def derivs(state, t):
dydx = np.zeros_like(state)
dydx[0] = state[1]
del_ = state[2] - state[0]
den1 = (M1 + M2)*L1 - M2*L1*cos(del_)*cos(del_)
dydx[1] = (M2*L1*state[1]*state[1]*sin(del_)*cos(del_) +
M2*G*sin(state[2])*cos(del_) +
M2*L2*state[3]*state[3]*sin(del_) -
(M1 + M2)*G*sin(state[0]))/den1
dydx[2] = state[3]
den2 = (L2/L1)*den1
dydx[3] = (-M2*L2*state[3]*state[3]*sin(del_)*cos(del_) +
(M1 + M2)*G*sin(state[0])*cos(del_) -
(M1 + M2)*L1*state[1]*state[1]*sin(del_) -
(M1 + M2)*G*sin(state[2]))/den2
return dydx
# create a time array from 0..100 sampled at 0.015 second steps
dt = 0.01
t = np.arange(0.0, 100.0, dt)
# th1 and th2 are the initial angles (degrees)
# w10 and w20 are the initial angular velocities (degrees per second)
th1 = 120.0
w1 = 0.0
th2 = -10.0
w2 = 0.0
# initial state
state = np.radians([th1, w1, th2, w2])
# integrate your ODE using scipy.integrate.
y = integrate.odeint(derivs, state, t)
P1 = np.dstack([L1*sin(y[:, 0]), -L1*cos(y[:, 0])]).squeeze()
P2 = P1 + np.dstack([L2*sin(y[:, 2]), -L2*cos(y[:, 2])]).squeeze()
fig = plt.figure(figsize=(5,5), facecolor=".85")
ax = plt.axes([0,0,1,1], frameon=False)
#subplot(1,1,1, aspect=1, frameon = False, xlim=(-2, 2), ylim=(-2, 2))
n = 250
colors= np.zeros((n,4))
colors[:,3] = np.linspace(0, 1, n, endpoint=True)
scatter = ax.scatter( | np.zeros(n) | numpy.zeros |
import os, sys, trimesh, matplotlib.pyplot as pyplot, numpy as np, time, random, progressbar, json
from plyfile import PlyData, PlyElement
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.6f')
from subprocess import call
from collections import deque
from imageio import imread
colors = [[0, 0, 1], [1, 0, 0], [0, 1, 0],
[0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.3, 0.6, 0], [0.6, 0, 0.3], [0.3, 0, 0.6],
[0.6, 0.3, 0], [0.3, 0, 0.6], [0.6, 0, 0.3],
[0.8, 0.2, 0.5]]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# ----------------------------------------
# Point Cloud Sampling
# ----------------------------------------
def random_sampling(pc, num_sample, replace=None, return_choices=False):
""" Input is NxC, output is num_samplexC
"""
if replace is None: replace = (pc.shape[0] < num_sample)
choices = np.random.choice(pc.shape[0], num_sample, replace=replace)
if return_choices:
return pc[choices], choices
else:
return pc[choices]
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b, :, :]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize, vsize, vsize))
voxel = 2 * radius / float(vsize)
locations = (points + radius) / voxel
locations = locations.astype(int)
vol[locations[:, 0], locations[:, 1], locations[:, 2]] = 1.0
return vol
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert (vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a, b, c] == 1:
points.append(np.array([a, b, c]))
if len(points) == 0:
return np.zeros((0, 3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b, :, :], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize, vsize, vsize, num_sample, 3))
voxel = 2 * radius / float(vsize)
locations = (points + radius) / voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n, :])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n, :])
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i, j, k) not in loc2pc:
vol[i, j, k, :, :] = np.zeros((num_sample, 3))
else:
pc = loc2pc[(i, j, k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0] > num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0] < num_sample:
pc = np.lib.pad(pc, ((0, num_sample - pc.shape[0]), (0, 0)), 'edge')
# Normalize
pc_center = (np.array([i, j, k]) + 0.5) * voxel - radius
pc = (pc - pc_center) / voxel # shift and scale
vol[i, j, k, :, :] = pc
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b, :, :], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2 * radius / float(imgsize)
locations = (points[:, 0:2] + radius) / pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n, :])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n, :])
for i in range(imgsize):
for j in range(imgsize):
if (i, j) not in loc2pc:
img[i, j, :, :] = np.zeros((num_sample, 3))
else:
pc = loc2pc[(i, j)]
pc = np.vstack(pc)
if pc.shape[0] > num_sample:
pc = random_sampling(pc, num_sample, False)
elif pc.shape[0] < num_sample:
pc = np.lib.pad(pc, ((0, num_sample - pc.shape[0]), (0, 0)), 'edge')
pc_center = (np.array([i, j]) + 0.5) * pixel - radius
pc[:, 0:2] = (pc[:, 0:2] - pc_center) / pixel
img[i, j, :, :] = pc
return img
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x, y, z in pc])
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i, 0], points[i, 1], points[i, 2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_color(points, labels, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
vertex = []
# colors = [pyplot.cm.jet(i / float(num_classes)) for i in range(num_classes)]
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x * 255) for x in c]
vertex.append((points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
vertex = np.array(vertex,
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=True).write(filename)
return colors
def merge_mesh_with_color(meshes):
face_colors = [mesh.visual.face_colors for mesh in meshes]
vertex_colors = [mesh.visual.vertex_colors for mesh in meshes]
vertice_list = [mesh.vertices for mesh in meshes]
faces_list = [mesh.faces for mesh in meshes]
faces_offset = np.cumsum([v.shape[0] for v in vertice_list])
faces_offset = np.insert(faces_offset, 0, 0)[:-1]
vertices = np.vstack(vertice_list)
faces = np.vstack([face + offset for face, offset in zip(faces_list, faces_offset)])
vertex_colors = np.vstack(vertex_colors)
face_colors = np.vstack(face_colors)
# print(vertex_colors.shape, faces.shape, vertices.shape)
# exit(0)
merged_meshes = trimesh.Trimesh(vertices, faces, face_colors=face_colors, vertex_colors=vertex_colors)
return merged_meshes
def write_ply_bbox_color(vertices, vertex_colors, edges, edge_colors, filename, num_classes=None, colormap=pyplot.cm.jet):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
vertex = []
for i in range(len(vertices)):
vertex.append((vertices[i, 0], vertices[i, 1], vertices[i, 2], vertex_colors[i, 0],
vertex_colors[i, 1], vertex_colors[i, 2]))
vertex = np.array(vertex,
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
edge = []
for i in range(len(edges)):
edge.append((edges[i, 0], edges[i, 1], edge_colors[i, 0], edge_colors[i, 1], edge_colors[i, 2]))
edge = np.array(edge,
dtype=[('vertex1', 'i4'), ('vertex2', 'i4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
e1 = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
e2 = PlyElement.describe(edge, 'edge', comments=['edges'])
PlyData([e1, e2], text=True).write(filename)
def write_bbox_color_json(scene_bbox, label, out_filename, num_classes=None, colormap=pyplot.cm.jet):
labels = label.astype(int)
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
used_color = {}
ret = []
for i, box in enumerate(scene_bbox):
c = colors[label[i]]
c = (np.array(c) * 255).astype(np.uint8)
item_i = [float(box[0]), float(box[1]), float(box[2]), float(box[3]), float(box[4]), float(box[5]),
int(c[0]), int(c[1]), int(c[2])]
used_color[label[i]] = c
#item_i = [str(_) for _ in item_i]
ret.append(item_i)
with open(out_filename, 'w') as f:
json.dump(ret, f)
return used_color
def write_bbox_color(scene_bbox, label, out_filename, num_classes=None, colormap=pyplot.cm.jet, edge=False):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
labels = label.astype(int)
if num_classes is None:
num_classes = np.max(labels) + 1
else:
assert (num_classes > np.max(labels))
def convert_box_to_trimesh_fmt(box, color):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
mesh = trimesh.creation.box(lengths, trns)
color = np.array(color) * 255
face_colors = np.array([color] * mesh.faces.shape[0], np.uint8)
vertex_colors = np.array([color] * mesh.vertices.shape[0], np.uint8)
#print(face_colors, vertex_colors, box_trimesh_fmt.vertices, box_trimesh_fmt.faces)
#exit(0)
box_visual = trimesh.visual.create_visual(
vertex_colors=vertex_colors,
face_colors=face_colors,
mesh=mesh)
mesh.visual = box_visual
# print(edges.shape)
# exit(0)
# print(box_trimesh_fmt.visual.face_colors)
#print(face_colors)
#print(box_visual.__dict__)
#print(box_trimesh_fmt.visual.__dict__)
#exit(0)
#, facecolors=color, vertex_color=color)
#print(box_trimesh_fmt.__dict__)
#exit(0)
return mesh
colors = [colormap(i / float(num_classes)) for i in range(num_classes)]
scene = []
ret = []
for i, box in enumerate(scene_bbox):
ret.append(colors[label[i]])
scene.append(convert_box_to_trimesh_fmt(box, colors[label[i]]))
mesh = merge_mesh_with_color(scene)
if edge:
sharp = mesh.face_adjacency_angles > np.radians(40)
edges = mesh.face_adjacency_edges[sharp]
assert edges.shape[0] % 12 == 0
edge_colors = mesh.visual.vertex_colors[edges[:, 0]]
#print(edges.shape, edge_colors.shape)
#exit(0)
write_ply_bbox_color(mesh.vertices, mesh.visual.vertex_colors, edges, edge_colors, out_filename)
else:
trimesh.exchange.export.export_mesh(mesh, out_filename, file_type='ply')
#print(mesh_list.visual.mesh.visual.__dict__)
# save to ply file
# ply = trimesh.exchange.ply.export_ply(mesh_list, encoding='ascii')
#trimesh.exchange.export.export_mesh(mesh_list, out_filename, file_type='ply') #, encoding='ascii')
# print(ply)
# exit(0)
# out_filename
return ret
def write_ply_rgb(points, colors, out_filename, num_classes=None):
""" Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ file """
colors = colors.astype(int)
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[i, :]
fout.write('v %f %f %f %d %d %d\n' % (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
fout.close()
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:, 0], points[:, 1], points[:, 2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
pyplot.savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
# ----------------------------------------
# Simple Point manipulations
# ----------------------------------------
def rotate_point_cloud(points, rotation_matrix=None):
""" Input: (n,3), Output: (n,3) """
# Rotate in-place around Z axis.
if rotation_matrix is None:
rotation_angle = np.random.uniform() * 2 * np.pi
sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1]])
ctr = points.mean(axis=0)
rotated_data = np.dot(points - ctr, rotation_matrix) + ctr
return rotated_data, rotation_matrix
def rotate_pc_along_y(pc, rot_angle):
''' Input ps is NxC points with first 3 channels as XYZ
z is facing forward, x is left ward, y is downward
'''
cosval = np.cos(rot_angle)
sinval = np.sin(rot_angle)
rotmat = np.array([[cosval, -sinval], [sinval, cosval]])
pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat))
return pc
def roty(t):
"""Rotation about the y-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def roty_batch(t):
"""Rotation about the y-axis.
t: (x1,x2,...xn)
return: (x1,x2,...,xn,3,3)
"""
input_shape = t.shape
output = np.zeros(tuple(list(input_shape) + [3, 3]))
c = np.cos(t)
s = np.sin(t)
output[..., 0, 0] = c
output[..., 0, 2] = s
output[..., 1, 1] = 1
output[..., 2, 0] = -s
output[..., 2, 2] = c
return output
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
# ----------------------------------------
# BBox
# ----------------------------------------
def bbox_corner_dist_measure(crnr1, crnr2):
""" compute distance between box corners to replace iou
Args:
crnr1, crnr2: Nx3 points of box corners in camera axis (y points down)
output is a scalar between 0 and 1
"""
dist = sys.maxsize
for y in range(4):
rows = ([(x + y) % 4 for x in range(4)] + [4 + (x + y) % 4 for x in range(4)])
d_ = np.linalg.norm(crnr2[rows, :] - crnr1, axis=1).sum() / 8.0
if d_ < dist:
dist = d_
u = sum([np.linalg.norm(x[0, :] - x[6, :]) for x in [crnr1, crnr2]]) / 2.0
measure = max(1.0 - dist / u, 0)
print(measure)
return measure
def point_cloud_to_bbox(points):
""" Extract the axis aligned box from a pcl or batch of pcls
Args:
points: Nx3 points or BxNx3
output is 6 dim: xyz pos of center and 3 lengths
"""
which_dim = len(points.shape) - 2 # first dim if a single cloud and second if batch
mn, mx = points.min(which_dim), points.max(which_dim)
lengths = mx - mn
cntr = 0.5 * (mn + mx)
return np.concatenate([cntr, lengths], axis=which_dim)
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_oriented_bbox_camera_coord(scene_bbox, out_filename):
"""Export oriented (around Y axis) scene bbox to meshes
Args:
scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz)
and heading angle around Y axis.
Z forward, X rightward, Y downward. heading angle of positive X is 0,
heading angle of negative Z is 90 degrees.
out_filename: (string) filename
"""
def heading2rotmat(heading_angle):
pass
rotmat = np.zeros((3, 3))
rotmat[1, 1] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0, :] = np.array([cosval, 0, sinval])
rotmat[2, :] = np.array([-sinval, 0, cosval])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64):
"""Create lines represented as cylinders connecting pairs of 3D points
Args:
pcl: (N x 2 x 3 numpy array): N pairs of xyz pos
filename: (string) filename for the output mesh (ply) file
rad: radius for the cylinder
res: number of sections used to create the cylinder
"""
scene = trimesh.scene.Scene()
for src, tgt in pcl:
# compute line
vec = tgt - src
M = trimesh.geometry.align_vectors([0, 0, 1], vec, False)
vec = tgt - src # compute again since align_vectors modifies vec in-place!
M[:3, 3] = 0.5 * src + 0.5 * tgt
height = np.sqrt(np.dot(vec, vec))
scene.add_geometry(trimesh.creation.cylinder(radius=rad, height=height, sections=res, transform=M))
mesh_list = trimesh.util.concatenate(scene.dump())
trimesh.io.export.export_mesh(mesh_list, '%s.ply' % (filename), file_type='ply')
def normalize_pts(pts):
out = np.array(pts, dtype=np.float32)
center = np.mean(out, axis=0)
out -= center
scale = np.sqrt(np.max(np.sum(out ** 2, axis=1)))
out /= scale
return out
def load_obj(fn, no_normal=False):
fin = open(fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
vertices = [];
normals = [];
faces = [];
for line in lines:
if line.startswith('v '):
vertices.append(np.float32(line.split()[1:4]))
elif line.startswith('vn '):
normals.append(np.float32(line.split()[1:4]))
elif line.startswith('f '):
faces.append(np.int32([item.split('/')[0] for item in line.split()[1:4]]))
mesh = dict()
mesh['faces'] = np.vstack(faces)
mesh['vertices'] = np.vstack(vertices)
if (not no_normal) and (len(normals) > 0):
assert len(normals) == len(vertices), 'ERROR: #vertices != #normals'
mesh['normals'] = np.vstack(normals)
return mesh
def export_obj_submesh_label(obj_fn, label_fn):
fin = open(obj_fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
face_ids = [];
cur_id = 0;
for line in lines:
if line.startswith('f '):
face_ids.append(cur_id)
elif line.startswith('g '):
cur_id += 1
fout = open(label_fn, 'w')
for i in range(len(face_ids)):
fout.write('%d\n' % face_ids[i])
fout.close()
def load_obj_with_submeshes(fn):
fin = open(fn, 'r')
lines = [line.rstrip() for line in fin]
fin.close()
vertices = [];
submesh_id = -1;
submesh_names = [];
faces = dict();
for line in lines:
if line.startswith('v '):
vertices.append(np.float32(line.split()[1:4]))
elif line.startswith('f '):
faces[submesh_id].append(np.int32([item.split('/')[0] for item in line.split()[1:4]]))
elif line.startswith('g '):
submesh_names.append(line.split()[1])
submesh_id += 1
faces[submesh_id] = []
vertice_arr = np.vstack(vertices)
mesh = dict()
mesh['names'] = submesh_names
mesh['tot'] = submesh_id + 1
out_vertices = dict()
out_faces = dict()
for i in range(submesh_id + 1):
data = np.vstack(faces[i]).astype(np.int32)
out_vertice_ids = np.array(list(set(data.flatten())), dtype=np.int32) - 1
vertice_map = {out_vertice_ids[x] + 1: x + 1 for x in range(len(out_vertice_ids))}
out_vertices[i] = vertice_arr[out_vertice_ids, :]
data = np.vstack(faces[i])
cur_out_faces = np.zeros(data.shape, dtype=np.float32)
for x in range(data.shape[0]):
for y in range(data.shape[1]):
cur_out_faces[x, y] = vertice_map[data[x, y]]
out_faces[i] = cur_out_faces
mesh['vertices'] = out_vertices
mesh['faces'] = out_faces
return mesh
def load_off(fn):
fin = open(fn, 'r')
line = fin.readline()
line = fin.readline()
num_vertices = int(line.split()[0])
num_faces = int(line.split()[1])
vertices = np.zeros((num_vertices, 3)).astype(np.float32)
for i in range(num_vertices):
vertices[i, :] = np.float32(fin.readline().split())
faces = np.zeros((num_faces, 3)).astype(np.int32)
for i in range(num_faces):
faces[i, :] = np.int32(fin.readline().split()[1:]) + 1
fin.close()
mesh = dict()
mesh['faces'] = faces
mesh['vertices'] = vertices
return mesh
def rotate_pts(pts, theta=0, phi=0):
rotated_data = np.zeros(pts.shape, dtype=np.float32)
# rotate along y-z axis
rotation_angle = phi / 90 * np.pi / 2
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[1, 0, 0],
[0, cosval, sinval],
[0, -sinval, cosval]])
rotated_pts = np.dot(pts, rotation_matrix)
# rotate along x-z axis
rotation_angle = theta / 360 * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
rotated_pts = np.dot(rotated_pts, rotation_matrix)
return rotated_pts
def load_pts(fn):
with open(fn, 'r') as fin:
lines = [item.rstrip() for item in fin]
pts = np.array([[float(line.split()[0]), float(line.split()[1]), float(line.split()[2])] for line in lines],
dtype=np.float32)
return pts
def load_pts_nor(fn):
with open(fn, 'r') as fin:
lines = [item.rstrip() for item in fin]
pts = np.array([[float(line.split()[0]), float(line.split()[1]), float(line.split()[2])] for line in lines],
dtype=np.float32)
nor = np.array([[float(line.split()[3]), float(line.split()[4]), float(line.split()[5])] for line in lines],
dtype=np.float32)
return pts, nor
def load_label(fn):
with open(fn, 'r') as fin:
lines = [item.rstrip() for item in fin]
label = np.array([int(line) for line in lines], dtype=np.int32)
return label
def export_obj(out, v, f):
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('v %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
for i in range(f.shape[0]):
fout.write('f %d %d %d\n' % (f[i, 0], f[i, 1], f[i, 2]))
def export_label(out, label):
with open(out, 'w') as fout:
for i in range(label.shape[0]):
fout.write('%d\n' % label[i])
def export_pts(out, v):
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('%f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
def export_pts_with_normal(out, v, n):
assert v.shape[0] == n.shape[0], 'v.shape[0] != v.shape[0]'
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('%f %f %f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], n[i, 0], n[i, 1], n[i, 2]))
def export_ply(out, v):
with open(out, 'w') as fout:
fout.write('ply\n');
fout.write('format ascii 1.0\n');
fout.write('element vertex ' + str(v.shape[0]) + '\n');
fout.write('property float x\n');
fout.write('property float y\n');
fout.write('property float z\n');
fout.write('end_header\n');
for i in range(v.shape[0]):
fout.write('%f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
def export_ply_with_label(out, v, l):
num_colors = len(colors)
with open(out, 'w') as fout:
fout.write('ply\n');
fout.write('format ascii 1.0\n');
fout.write('element vertex ' + str(v.shape[0]) + '\n');
fout.write('property float x\n');
fout.write('property float y\n');
fout.write('property float z\n');
fout.write('property uchar red\n');
fout.write('property uchar green\n');
fout.write('property uchar blue\n');
fout.write('end_header\n');
for i in range(v.shape[0]):
cur_color = colors[l[i] % num_colors]
fout.write('%f %f %f %d %d %d\n' % (v[i, 0], v[i, 1], v[i, 2], \
int(cur_color[0] * 255), int(cur_color[1] * 255),
int(cur_color[2] * 255)))
def export_ply_with_normal(out, v, n):
assert v.shape[0] == n.shape[0], 'v.shape[0] != v.shape[0]'
with open(out, 'w') as fout:
fout.write('ply\n');
fout.write('format ascii 1.0\n');
fout.write('element vertex ' + str(v.shape[0]) + '\n');
fout.write('property float x\n');
fout.write('property float y\n');
fout.write('property float z\n');
fout.write('property float nx\n');
fout.write('property float ny\n');
fout.write('property float nz\n');
fout.write('end_header\n');
for i in range(v.shape[0]):
fout.write('%f %f %f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], n[i, 0], n[i, 1], n[i, 2]))
def sample_points_from_obj(label_fn, obj_fn, pts_fn, num_points, verbose=False):
cmd = 'MeshSample -n%d -s3 -l %s %s %s> /dev/null' % (num_points, label_fn, obj_fn, pts_fn)
if verbose: print(cmd)
call(cmd, shell=True)
with open(pts_fn, 'r') as fin:
lines = [line.rstrip() for line in fin]
pts = np.array([[line.split()[0], line.split()[1], line.split()[2]] for line in lines], dtype=np.float32)
label = np.array([int(line.split()[-1].split('"')[1]) for line in lines], dtype=np.int32)
if verbose: print('get pts: ', pts.shape)
return pts, label
def sample_points(v, f, label=None, num_points=200, verbose=False):
tmp_obj = str(time.time()).replace('.', '_') + '_' + str(random.random()).replace('.', '_') + '.obj'
tmp_pts = tmp_obj.replace('.obj', '.pts')
tmp_label = tmp_obj.replace('.obj', '.label')
if label is None:
label = np.zeros((f.shape[0]), dtype=np.int32)
export_obj(tmp_obj, v, f)
export_label(tmp_label, label)
pts, fid = sample_points_from_obj(tmp_label, tmp_obj, tmp_pts, num_points=num_points, verbose=verbose)
cmd = 'rm -rf %s %s %s' % (tmp_obj, tmp_pts, tmp_label)
call(cmd, shell=True)
return pts, fid
def export_pts_with_color(out, pc, label):
num_point = pc.shape[0]
with open(out, 'w') as fout:
for i in range(num_point):
cur_color = label[i]
fout.write('%f %f %f %d %d %d\n' % (pc[i, 0], pc[i, 1], pc[i, 2], cur_color[0], cur_color[1], cur_color[2]))
def export_pts_with_label(out, pc, label, base=0):
num_point = pc.shape[0]
num_colors = len(colors)
with open(out, 'w') as fout:
for i in range(num_point):
cur_color = colors[label[i] % num_colors]
fout.write('%f %f %f %f %f %f\n' % (pc[i, 0], pc[i, 1], pc[i, 2], cur_color[0], cur_color[1], cur_color[2]))
def export_pts_with_keypoints(out, pc, kp_list):
num_point = pc.shape[0]
with open(out, 'w') as fout:
for i in range(num_point):
if i in kp_list:
color = [1.0, 0.0, 0.0]
else:
color = [0.0, 0.0, 1.0]
fout.write('%f %f %f %f %f %f\n' % (pc[i, 0], pc[i, 1], pc[i, 2], color[0], color[1], color[2]))
def compute_boundary_labels(pc, seg, radius=0.05):
num_points = len(seg)
assert num_points == pc.shape[0]
assert pc.shape[1] == 3
bdr = np.zeros((num_points)).astype(np.int32)
square_sum = np.sum(pc * pc, axis=1)
A = np.tile(np.expand_dims(square_sum, axis=0), [num_points, 1])
B = np.tile(np.expand_dims(square_sum, axis=1), [1, num_points])
C = np.dot(pc, pc.T)
dist = A + B - 2 * C
for i in range(num_points):
neighbor_seg = seg[dist[i, :] < radius ** 2]
if len(set(neighbor_seg)) > 1:
bdr[i] = 1
return bdr
def render_obj(out, v, f, delete_img=False, flat_shading=True):
tmp_obj = out.replace('.png', '.obj')
export_obj(tmp_obj, v, f)
if flat_shading:
cmd = 'RenderShape -0 %s %s 600 600 > /dev/null' % (tmp_obj, out)
else:
cmd = 'RenderShape %s %s 600 600 > /dev/null' % (tmp_obj, out)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s' % (tmp_obj)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_obj_with_label(out, v, f, label, delete_img=False, base=0):
tmp_obj = out.replace('.png', '.obj')
tmp_label = out.replace('.png', '.label')
label += base
export_obj(tmp_obj, v, f)
export_label(tmp_label, label)
cmd = 'RenderShape %s -l %s %s 600 600 > /dev/null' % (tmp_obj, tmp_label, out)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s %s' % (tmp_obj, tmp_label)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_pts_with_label(out, pts, label, delete_img=False, base=0, point_size=6):
tmp_pts = out.replace('.png', '.pts')
tmp_label = out.replace('.png', '.label')
label += base
export_pts(tmp_pts, pts)
export_label(tmp_label, label)
cmd = 'RenderShape %s -l %s %s 600 600 -p %d > /dev/null' % (tmp_pts, tmp_label, out, point_size)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s %s' % (tmp_pts, tmp_label)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_pts(out, pts, delete_img=False, point_size=6, point_color='FF0000FF'):
tmp_pts = out.replace('.png', '.pts')
export_pts(tmp_pts, pts)
cmd = 'RenderShape %s %s 600 600 -p %d -c %s > /dev/null' % (tmp_pts, out, point_size, point_color)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s' % tmp_pts
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def render_pts_with_keypoints(out, pts, kp_list, delete_img=False, \
point_size=6, fancy_kp=False, fancy_kp_num=20, fancy_kp_radius=0.02):
tmp_pts = out.replace('.png', '.pts')
tmp_label = out.replace('.png', '.label')
num_point = pts.shape[0]
labels = np.ones((num_point), dtype=np.int32) * 14
for idx in kp_list:
labels[idx] = 13
if fancy_kp:
num_kp = len(kp_list)
more_pts = np.zeros((num_kp * fancy_kp_num, 3), dtype=np.float32)
more_labels = np.ones((num_kp * fancy_kp_num), dtype=np.int32) * 13
for i, idx in enumerate(kp_list):
for j in range(fancy_kp_num):
x = np.random.randn()
y = np.random.randn()
z = np.random.randn()
l = np.sqrt(x ** 2 + y ** 2 + z ** 2)
x = x / l * fancy_kp_radius + pts[idx, 0]
y = y / l * fancy_kp_radius + pts[idx, 1]
z = z / l * fancy_kp_radius + pts[idx, 2]
more_pts[i * fancy_kp_num + j, 0] = x
more_pts[i * fancy_kp_num + j, 1] = y
more_pts[i * fancy_kp_num + j, 2] = z
pts = np.concatenate((pts, more_pts), axis=0)
labels = np.concatenate((labels, more_labels), axis=0)
export_pts(tmp_pts, pts)
export_label(tmp_label, labels)
cmd = 'RenderShape %s -l %s %s 600 600 -p %d > /dev/null' % (tmp_pts, tmp_label, out, point_size)
call(cmd, shell=True)
img = np.array(imread(out), dtype=np.float32)
cmd = 'rm -rf %s %s' % (tmp_pts, tmp_label)
call(cmd, shell=True)
if delete_img:
cmd = 'rm -rf %s' % out
call(cmd, shell=True)
return img
def compute_normal(pts, neighbor=50):
l = pts.shape[0]
assert (l > neighbor)
t = np.sum(pts ** 2, axis=1)
A = np.tile(t, (l, 1))
C = np.array(A).T
B = np.dot(pts, pts.T)
dist = A - 2 * B + C
neigh_ids = dist.argsort(axis=1)[:, :neighbor]
vec_ones = np.ones((neighbor, 1)).astype(np.float32)
normals = np.zeros((l, 3)).astype(np.float32)
for idx in range(l):
D = pts[neigh_ids[idx, :], :]
cur_normal = np.dot(np.linalg.pinv(D), vec_ones)
cur_normal = np.squeeze(cur_normal)
len_normal = np.sqrt(np.sum(cur_normal ** 2))
normals[idx, :] = cur_normal / len_normal
if np.dot(normals[idx, :], pts[idx, :]) < 0:
normals[idx, :] = -normals[idx, :]
return normals
def transfer_label_from_pts_to_obj(vertices, faces, pts, label):
assert pts.shape[0] == label.shape[0], 'ERROR: #pts != #label'
num_pts = pts.shape[0]
num_faces = faces.shape[0]
face_centers = []
for i in range(num_faces):
face_centers.append(
(vertices[faces[i, 0] - 1, :] + vertices[faces[i, 1] - 1, :] + vertices[faces[i, 2] - 1, :]) / 3)
face_center_array = np.vstack(face_centers)
A = np.tile(np.expand_dims(np.sum(face_center_array ** 2, axis=1), axis=0), [num_pts, 1])
B = np.tile(np.expand_dims(np.sum(pts ** 2, axis=1), axis=1), [1, num_faces])
C = np.dot(pts, face_center_array.T)
dist = A + B - 2 * C
lid = np.argmax(-dist, axis=0)
face_label = label[lid]
return face_label
def detect_connected_component(vertices, faces, face_labels=None):
edge2facelist = dict()
num_vertices = vertices.shape[0]
num_faces = faces.shape[0]
bar = progressbar.ProgressBar()
face_id_list = []
for face_id in bar(range(num_faces)):
f0 = faces[face_id, 0] - 1
f1 = faces[face_id, 1] - 1
f2 = faces[face_id, 2] - 1
id_list = np.sort([f0, f1, f2])
s0 = id_list[0]
s1 = id_list[1]
s2 = id_list[2]
key1 = '%d_%d' % (s0, s1)
if key1 in edge2facelist.keys():
edge2facelist[key1].append(face_id)
else:
edge2facelist[key1] = [face_id]
key2 = '%d_%d' % (s1, s2)
if key2 in edge2facelist.keys():
edge2facelist[key2].append(face_id)
else:
edge2facelist[key2] = [face_id]
key3 = '%d_%d' % (s0, s2)
if key3 in edge2facelist.keys():
edge2facelist[key3].append(face_id)
else:
edge2facelist[key3] = [face_id]
face_id_list.append([key1, key2, key3])
face_used = np.zeros((num_faces), dtype=np.bool);
face_seg_id = np.zeros((num_faces), dtype=np.int32);
cur_id = 0;
new_part = False
for i in range(num_faces):
q = deque()
q.append(i)
while len(q) > 0:
face_id = q.popleft()
if not face_used[face_id]:
face_used[face_id] = True
new_part = True
face_seg_id[face_id] = cur_id
for key in face_id_list[face_id]:
for new_face_id in edge2facelist[key]:
if not face_used[new_face_id] and (face_labels == None or
face_labels[new_face_id] == face_labels[face_id]):
q.append(new_face_id)
if new_part:
cur_id += 1
new_part = False
return face_seg_id
def calculate_two_pts_distance(pts1, pts2):
A = np.tile(np.expand_dims(np.sum(pts1 ** 2, axis=1), axis=-1), [1, pts2.shape[0]])
B = np.tile(np.expand_dims(np.sum(pts2 ** 2, axis=1), axis=0), [pts1.shape[0], 1])
C = np.dot(pts1, pts2.T)
dist = A + B - 2 * C
return dist
def propagate_pts_seg_from_another_pts(ori_pts, ori_seg, tar_pts):
dist = calculate_two_pts_distance(ori_pts, tar_pts)
idx = np.argmin(dist, axis=0)
return ori_seg[idx]
# ----------------------------------------
# Testing
# ----------------------------------------
if __name__ == '__main__':
print('running some tests')
############
## Test "write_lines_as_cylinders"
############
pcl = np.random.rand(32, 2, 3)
write_lines_as_cylinders(pcl, 'point_connectors')
input()
scene_bbox = np.zeros((1, 7))
scene_bbox[0, 3:6] = np.array([1, 2, 3]) # dx,dy,dz
scene_bbox[0, 6] = np.pi / 4 # 45 degrees
write_oriented_bbox(scene_bbox, 'single_obb_45degree.ply')
############
## Test point_cloud_to_bbox
############
pcl = | np.random.rand(32, 16, 3) | numpy.random.rand |
'''
25 2D-Gaussian Simulation
Compare different Sampling methods and DRE methods
1. DRE method
1.1. By NN
DR models: MLP
Loss functions: uLISF, DSKL, BARR, SP (ours)
lambda for SP is selected by maximizing average denstity ratio on validation set
1.2. GAN property
2. Data Generation:
(1). Target Distribution p_r: A mixture of 25 2-D Gaussian
25 means which are combinations of any two points in [-2, -1, 0, 1, 2]
Each Gaussian has a covariance matrix sigma*diag(2), sigma=0.05
(2). Proposal Distribution p_g: GAN
NTRAIN = 50000, NVALID=50000, NTEST=10000
3. Sampling from GAN by None/RS/MH/SIR
4. Evaluation on a held-out test set
Prop. of good samples (within 4 sd) and Prop. of recovered modes
'''
wd = './Simulation'
import os
os.chdir(wd)
import timeit
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.nn import functional as F
import random
import matplotlib.pyplot as plt
import matplotlib as mpl
from torch import autograd
from torchvision.utils import save_image
from tqdm import tqdm
import gc
from itertools import groupby
import argparse
from sklearn.linear_model import LogisticRegression
import multiprocessing
from scipy.stats import multivariate_normal
from scipy.stats import ks_2samp
import pickle
import csv
from sklearn.model_selection import GridSearchCV
from sklearn import mixture
from utils import *
from models import *
from Train_DRE import train_DRE_GAN
from Train_GAN import *
#######################################################################################
''' Settings '''
#######################################################################################
parser = argparse.ArgumentParser(description='Simulation')
'''Overall Settings'''
parser.add_argument('--NSIM', type=int, default=1,
help = "How many times does this experiment need to be repeated?")
parser.add_argument('--DIM', type=int, default=2,
help = "Dimension of the Euclidean space of our interest")
parser.add_argument('--n_comp_tar', type=int, default=25,
help = "Number of mixture components in the target distribution")
parser.add_argument('--DRE', type=str, default='DRE_SP',
choices=['None', 'GT', 'DRE_uLSIF', 'DRE_DSKL', 'DRE_BARR', 'DRE_SP', 'disc', 'disc_MHcal', 'disc_KeepTrain'], #GT is ground truth
help='Density ratio estimation method; None means randomly sample from the proposal distribution or the trained GAN')
parser.add_argument('--Sampling', type=str, default='RS',
help='Sampling method; Candidiate: None, RS, MH, SIR') #RS: rejection sampling, MH: Metropolis-Hastings; SIR: Sampling-Importance Resampling
parser.add_argument('--seed', type=int, default=2019, metavar='S',
help='random seed (default: 2019)')
parser.add_argument('--show_reference', action='store_true', default=False,
help='Assign 1 as density ratios to all samples and compute errors')
parser.add_argument('--show_visualization', action='store_true', default=False,
help='Plot fake samples in 2D coordinate')
''' Data Generation '''
parser.add_argument('--NTRAIN', type=int, default=50000)
parser.add_argument('--NTEST', type=int, default=10000)
''' GAN settings '''
parser.add_argument('--epoch_gan', type=int, default=50) #default 50
parser.add_argument('--lr_gan', type=float, default=1e-3,
help='learning rate')
parser.add_argument('--dim_gan', type=int, default=2,
help='Latent dimension of GAN')
parser.add_argument('--batch_size_gan', type=int, default=512, metavar='N',
help='input batch size for training GAN')
parser.add_argument('--resumeTrain_gan', type=int, default=0)
parser.add_argument('--compute_disc_err', action='store_true', default=False,
help='Compute the distance between the discriminator and its optimality')
'''DRE Settings'''
parser.add_argument('--DR_Net', type=str, default='MLP5',
choices=['MLP3', 'MLP5', 'MLP7', 'MLP9',
'CNN5'],
help='DR Model') # DR models
parser.add_argument('--epoch_DRE', type=int, default=200)
parser.add_argument('--base_lr_DRE', type=float, default=1e-5,
help='learning rate')
parser.add_argument('--decay_lr_DRE', action='store_true', default=False,
help='decay learning rate')
parser.add_argument('--lr_decay_epochs_DRE', type=int, default=400)
parser.add_argument('--batch_size_DRE', type=int, default=512, metavar='N',
help='input batch size for training DRE')
parser.add_argument('--lambda_DRE', type=float, default=0.0, #BARR: lambda=10
help='penalty in DRE')
parser.add_argument('--weightdecay_DRE', type=float, default=0.0,
help='weight decay in DRE')
parser.add_argument('--resumeTrain_DRE', type=int, default=0)
parser.add_argument('--DR_final_ActFn', type=str, default='ReLU',
help='Final layer of the Density-ratio model; Candidiate: Softplus or ReLU')
parser.add_argument('--TrainPreNetDRE', action='store_true', default=False,
help='Pre-trained MLP for DRE in Feature Space')
parser.add_argument('--DRE_save_at_epoch', nargs='+', type=int)
parser.add_argument('--epoch_KeepTrain', type=int, default=20)
parser.add_argument('--compute_dre_err', action='store_true', default=False,
help='Compare the DRE method with the ground truth')
''' Mixture Gaussian (for density estimation) Settings '''
parser.add_argument('--gmm_nfake', type=int, default=100000)
# parser.add_argument('--gmm_ncomp', type=int, default=0) #gmm_ncomp is non-positive, then we do ncomp selection
parser.add_argument('--gmm_ncomp_nsim', nargs='+', type=int) #A list of ncomp for NSIM rounds. If gmm_ncomp is None, then we do ncomp selection
parser.add_argument('--gmm_ncomp_grid', nargs='+', type=int)
parser.add_argument('--gmm_ncomp_grid_lb', type=int, default=1)
parser.add_argument('--gmm_ncomp_grid_ub', type=int, default=100)
parser.add_argument('--gmm_ncomp_grid_step', type=int, default=1)
args = parser.parse_args()
#--------------------------------
# system
assert torch.cuda.is_available()
NGPU = torch.cuda.device_count()
device = torch.device("cuda" if NGPU>0 else "cpu")
cores= multiprocessing.cpu_count()
#--------------------------------
# Extra Data Generation Settings
n_comp_tar = args.n_comp_tar
n_features = args.DIM
mean_grid_tar = [-2, -1, 0, 1, 2]
sigma_tar = 0.05
n_classes = n_comp_tar
quality_threshold = sigma_tar*4 #good samples are within 4 standard deviation
#--------------------------------
# GAN Settings
epoch_GAN = args.epoch_gan
lr_GAN = args.lr_gan
batch_size_GAN = args.batch_size_gan
dim_GAN = args.dim_gan
plot_in_train = True
gan_Adam_beta1 = 0.5
gan_Adam_beta2 = 0.999
#--------------------------------
# Extra DRE Settings
DRE_Adam_beta1 = 0.5
DRE_Adam_beta2 = 0.999
comp_ratio_bs = 1000 #batch size for computing density ratios
base_lr_PreNetDRE = 1e-1
epoch_PreNetDRE = 100
DRE_save_at_epoch = args.DRE_save_at_epoch # save checkpoints at these epochs
# DRE_save_at_epoch = [20, 50, 100, 150, 200, 300, 400, 500, 800]
epoch_KeepTrain = args.epoch_KeepTrain #keep training for DRS
ckp_epoch_KeepTrain = [i for i in range(100) if i%5==0]
#--------------------------------
# Mixture Gaussian Setting
gmm_nfake = args.gmm_nfake
# gmm_ncomp = args.gmm_ncomp
gmm_ncomp_nsim = args.gmm_ncomp_nsim
# if gmm_ncomp_nsim is not None:
# assert len(gmm_ncomp_nsim) == args.NSIM
if args.gmm_ncomp_grid is not None:
gmm_ncomp_grid = args.gmm_ncomp_grid
else:
gmm_ncomp_grid = np.arange(args.gmm_ncomp_grid_lb, args.gmm_ncomp_grid_ub+args.gmm_ncomp_grid_step, args.gmm_ncomp_grid_step)
#--------------------------------
# Extra Sampling Settings
NFAKE = args.NTEST
samp_batch_size = 10000
MH_K = 100
MH_mute = True #do not print sampling progress
#-------------------------------
# seeds
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
np.random.seed(args.seed)
#-------------------------------
# output folders
save_models_folder = wd + '/Output/saved_models/'
os.makedirs(save_models_folder,exist_ok=True)
save_images_folder = wd + '/Output/saved_images/'
os.makedirs(save_images_folder,exist_ok=True)
save_traincurves_folder = wd + '/Output/Training_loss_fig/'
os.makedirs(save_traincurves_folder,exist_ok=True)
save_GANimages_InTrain_folder = wd + '/Output/saved_images/GAN_InTrain'
os.makedirs(save_GANimages_InTrain_folder,exist_ok=True)
save_objects_folder = wd + '/Output/saved_objects'
os.makedirs(save_objects_folder,exist_ok=True)
#######################################################################################
''' Start Experiment '''
#######################################################################################
#---------------------------------
# sampler for reference distribution
means_tar = np.zeros((1,n_features))
for i in mean_grid_tar:
for j in mean_grid_tar:
means_tar = np.concatenate((means_tar, np.array([i,j]).reshape(-1,n_features)), axis=0)
means_tar = means_tar[1:]
assert means_tar.shape[0] == n_comp_tar
assert means_tar.shape[1] == n_features
def generate_data_tar(nsamp):
return sampler_MixGaussian(nsamp, means_tar, sigma = sigma_tar, dim = n_features)
def p_r(samples): #pdf of the underlying distribution; samples is a n by n_features sample matrix
return pdf_MixGaussian(samples, means_tar, sigma_tar)
prop_recovered_modes = np.zeros(args.NSIM) # num of removed modes diveded by num of modes
prop_good_samples = np.zeros(args.NSIM) # num of good fake samples diveded by num of all fake samples
valid_densityratios_all = [] #store denstiy ratios for validation samples
train_densityratios_all = []
ks_test_results = np.zeros((args.NSIM,2))
dre_errors_all = np.zeros(args.NSIM) #compute density ratios on the test set (hold-out set) with each DRE method and the ground truth
dre_errors_hq = np.zeros(args.NSIM)
dre_errors_lq = np.zeros(args.NSIM)
esti_avg_densityratio = np.zeros((args.NSIM, 4)) #estimated density ratios of testing samples, NFAKE fake samples, HQ fake samples, LQ fake samples
true_avg_densityratio = np.zeros((args.NSIM, 4)) #true density ratios of testing samples, NFAKE fake samples, HQ fake samples, LQ fake samples
disc_errors_all = np.zeros(args.NSIM) #compute the distance between the discriminator and its optimality
nfake_in_train = np.zeros(args.NSIM)
print("\n Begin The Experiment. Sample from a GAN! >>>")
start = timeit.default_timer()
for nSim in range(args.NSIM):
print("Round %s" % (nSim))
np.random.seed(nSim) #set seed for current simulation
###############################################################################
# Data generation and dataloaders
###############################################################################
train_samples_tar, train_labels_tar = generate_data_tar(args.NTRAIN)
valid_samples_tar, valid_labels_tar = generate_data_tar(args.NTRAIN)
test_samples_tar, test_labels_tar = generate_data_tar(args.NTEST)
train_dataset_tar = custom_dataset(train_samples_tar, train_labels_tar)
test_dataset_tar = custom_dataset(test_samples_tar, test_labels_tar)
train_dataloader_tar = torch.utils.data.DataLoader(train_dataset_tar, batch_size=args.batch_size_DRE, shuffle=True, num_workers=0)
test_dataloader_tar = torch.utils.data.DataLoader(test_dataset_tar, batch_size=100, shuffle=False, num_workers=0)
# #compute the criterion for determing good smaples through train_samples_tar
# # for each mixture component, compute the average distance of real samples from this component to the mean
# l2_dis_train_samples = np.zeros(args.NTRAIN) #l2 distance between a fake sample and a mode
# for i in range(args.NTRAIN):
# indx_mean = int(train_labels_tar[i])
# l2_dis_train_samples[i] = np.sqrt(np.sum((train_samples_tar[i]-means_tar[indx_mean])**2))
# print(l2_dis_train_samples.max())
###############################################################################
# Train a GAN model
###############################################################################
Filename_GAN = save_models_folder + '/ckpt_GAN_epoch_' + str(args.epoch_gan) + '_SEED_' + str(args.seed) + '_nSim_' + str(nSim)
print("\n Begin Training GAN:")
#model initialization
netG = generator(ngpu=NGPU, nz=dim_GAN, out_dim=n_features)
netD = discriminator(ngpu=NGPU, input_dim = n_features)
if not os.path.isfile(Filename_GAN):
criterion = nn.BCELoss()
optimizerG = torch.optim.Adam(netG.parameters(), lr=lr_GAN, betas=(gan_Adam_beta1, gan_Adam_beta2))
optimizerD = torch.optim.Adam(netD.parameters(), lr=lr_GAN, betas=(gan_Adam_beta1, gan_Adam_beta2))
# Start training
netG, netD, optimizerG, optimizerD = train_GAN(epoch_GAN, dim_GAN, train_dataloader_tar, netG, netD, optimizerG, optimizerD, criterion, save_models_folder = save_models_folder, ResumeEpoch = args.resumeTrain_gan, device=device, plot_in_train=plot_in_train, save_images_folder = save_GANimages_InTrain_folder, samples_tar = test_samples_tar)
# store model
torch.save({
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
}, Filename_GAN)
torch.cuda.empty_cache()
else: #load pre-trained GAN
print("\n GAN exists! Loading Pretrained Model>>>")
checkpoint = torch.load(Filename_GAN)
netG.load_state_dict(checkpoint['netG_state_dict'])
netD.load_state_dict(checkpoint['netD_state_dict'])
netG = netG.to(device)
netD = netD.to(device)
def fn_sampleGAN(nfake, batch_size=1000):
return SampGAN(netG, GAN_Latent_Length = args.dim_gan, NFAKE = nfake, batch_size = batch_size, device=device)
###############################################################################
# Construct a function to compute density-ratio
###############################################################################
# Approximate DR by NN
if args.DRE in ['DRE_uLSIF', 'DRE_DSKL', 'DRE_BARR', 'DRE_SP']:
# TRAIN DRE
DRE_loss_type = args.DRE[4:] #loss type
if args.DR_Net in ['MLP3', 'MLP5', 'MLP7', 'MLP9']:
netDR = DR_MLP(args.DR_Net, init_in_dim = n_features, ngpu=NGPU, final_ActFn=args.DR_final_ActFn)
elif args.DR_Net in ['CNN5']:
netDR = DR_CNN(args.DR_Net, init_in_dim = n_features, ngpu=NGPU, final_ActFn=args.DR_final_ActFn)
optimizer = torch.optim.Adam(netDR.parameters(), lr = args.base_lr_DRE, betas=(DRE_Adam_beta1, DRE_Adam_beta2), weight_decay=args.weightdecay_DRE)
#optimizer = torch.optim.RMSprop(netDR.parameters(), lr= args.base_lr_DRE, alpha=0.99, eps=1e-08, weight_decay=args.weightdecay_DRE, momentum=0.9, centered=False)
Filename_DRE = save_models_folder + '/ckpt_' + args.DRE +'_LAMBDA_' + str(args.lambda_DRE) + '_FinalActFn_' + args.DR_final_ActFn + '_epoch_' + str(args.epoch_DRE) \
+ "_PreNetDRE_" + str(args.TrainPreNetDRE) + '_SEED_' + str(args.seed) + '_nSim_' + str(nSim) + '_epochGAN_' + str(epoch_GAN)
filename0 = save_traincurves_folder + '/TrainCurve_' + args.DRE +'_LAMBDA_' + str(args.lambda_DRE) + '_FinalActFn_' + args.DR_final_ActFn + '_epoch_' \
+ str(args.epoch_DRE) + "_PreNetDRE_" + str(args.TrainPreNetDRE) + '_SEED_' + str(args.seed) + "_nSim_" + str(nSim) + '_epochGAN_' + str(epoch_GAN) + "_TrainLoss"
plot_filename = filename0 + '.pdf'
npy_filename = filename0 + '.npy'
# Train a net to extract features for DR net
if args.TrainPreNetDRE:
print("\n Begin Training PreNetDRE Net:")
Filename_PreNetDRE = save_models_folder + '/ckpt_PreNetDRE_epochPreNetDRE_' + str(epoch_PreNetDRE) + '_SEED_' + str(args.seed) + '_nSim_' + str(nSim) + '_epochGAN_' + str(epoch_GAN)
PreNetDRE_MLP = PreNetDRE_MLP(init_in_dim = n_features, ngpu=NGPU)
if not os.path.isfile(Filename_PreNetDRE):
criterion_PreNetDRE = nn.CrossEntropyLoss()
optimizer_PreNetDRE = torch.optim.SGD(PreNetDRE_MLP.parameters(), lr = base_lr_PreNetDRE, momentum= 0.9, weight_decay=1e-4)
PreNetDRE_MLP, _ = train_PreNetDRE(epoch_PreNetDRE, train_dataloader_tar, test_dataloader_tar, PreNetDRE_MLP, base_lr_PreNetDRE, optimizer_PreNetDRE, criterion_PreNetDRE, device=device)
# save model
torch.save({
'net_state_dict': PreNetDRE_MLP.state_dict(),
}, Filename_PreNetDRE)
else:
print("\n PreNetDRE Net exits and start loading:")
checkpoint_PreNetDRE_MLP = torch.load(Filename_PreNetDRE)
PreNetDRE_MLP.load_state_dict(checkpoint_PreNetDRE_MLP['net_state_dict'])
PreNetDRE_MLP = PreNetDRE_MLP.to(device)
def extract_features(samples):
#samples: an numpy array
n_samples = samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
extracted_features = np.zeros((n_samples+batch_size_tmp, n_features))
PreNetDRE_MLP.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
_, batch_features = PreNetDRE_MLP(batch_samples)
extracted_features[tmp:(tmp+batch_size_tmp)] = batch_features.cpu().detach().numpy()
tmp += batch_size_tmp
#end while
return extracted_features[0:n_samples]
test_features_tar = extract_features(test_samples_tar)
plt.switch_backend('agg')
mpl.style.use('seaborn')
plt.figure()
plt.grid(b=True)
flag0 = 0; flag1=0
colors = ['b','g','r','c','m','y','k']
marker_styles = ['.', 'o', 'v', 's']
for nc in range(n_classes):
indx = np.where(test_labels_tar == nc)[0]
plt.scatter(test_features_tar[indx, 0], test_features_tar[indx, 1], c=colors[flag0], marker=marker_styles[flag1], s=8)
flag0 += 1
if flag0 % 7 ==0 :
flag0 = 0; flag1+=1
filename0 = save_images_folder + '/test.pdf'
plt.savefig(filename0)
plt.close()
if not os.path.isfile(Filename_DRE):
# Train
print("\n Begin Training DRE NET:")
if args.TrainPreNetDRE:
netDR, optimizer, avg_train_loss = train_DRE_GAN(net=netDR, optimizer=optimizer, BASE_LR_DRE=args.base_lr_DRE, EPOCHS_DRE=args.epoch_DRE, LAMBDA=args.lambda_DRE, tar_dataloader=train_dataloader_tar, netG=netG, dim_gan=dim_GAN, PreNetDRE = PreNetDRE_MLP, decay_lr=args.decay_lr_DRE, decay_epochs=args.lr_decay_epochs_DRE, loss_type=DRE_loss_type, save_models_folder = save_models_folder, ResumeEpoch=args.resumeTrain_DRE, NGPU=NGPU, device=device, save_at_epoch = DRE_save_at_epoch, current_nsim=nSim)
else:
netDR, optimizer, avg_train_loss = train_DRE_GAN(net=netDR, optimizer=optimizer, BASE_LR_DRE=args.base_lr_DRE, EPOCHS_DRE=args.epoch_DRE, LAMBDA=args.lambda_DRE, tar_dataloader=train_dataloader_tar, netG=netG, dim_gan=dim_GAN, decay_lr=args.decay_lr_DRE, decay_epochs=args.lr_decay_epochs_DRE, loss_type=DRE_loss_type, save_models_folder = save_models_folder, ResumeEpoch=args.resumeTrain_DRE, NGPU=NGPU, device=device, save_at_epoch = DRE_save_at_epoch, current_nsim=nSim)
# Plot loss
PlotLoss(avg_train_loss, plot_filename)
np.save(npy_filename, np.array(avg_train_loss))
# save model
torch.save({
'net_state_dict': netDR.state_dict(),
}, Filename_DRE)
else: #if the DR model is already trained, load the checkpoint
print("\n DRE NET exists and start loading:")
checkpoint_netDR = torch.load(Filename_DRE)
netDR.load_state_dict(checkpoint_netDR['net_state_dict'])
netDR = netDR.to(device)
def comp_density_ratio(samples, verbose=False):
#samples: an numpy array
n_samples = samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
density_ratios = np.zeros((n_samples+batch_size_tmp, 1))
netDR.eval()
if args.TrainPreNetDRE:
PreNetDRE_MLP.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
if args.TrainPreNetDRE:
_, batch_features = PreNetDRE_MLP(batch_samples)
batch_weights = netDR(batch_features)
else:
batch_weights = netDR(batch_samples)
#density_ratios[tmp:(tmp+batch_size_tmp)] = batch_weights.cpu().detach().numpy()
density_ratios[tmp:(tmp+batch_size_tmp)] = batch_weights.cpu().numpy()
tmp += batch_size_tmp
if verbose:
print(batch_weights.cpu().numpy().mean())
#end while
return density_ratios[0:n_samples]+1e-14
###################
# DRE based on GAN property
elif args.DRE in ['disc', 'disc_MHcal', 'disc_KeepTrain']:
if args.DRE == 'disc': #use GAN property to compute density ratio; ratio=D/(1-D);
# function for computing a bunch of images
# def comp_density_ratio(samples, netD):
def comp_density_ratio(samples):
#samples: an numpy array
n_samples = samples.shape[0]
batch_size_tmp = 1000
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
data_iter = iter(dataloader_tmp)
density_ratios = np.zeros((n_samples+batch_size_tmp, 1))
# print("\n Begin computing density ratio for images >>")
netD.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
disc_probs = netD(batch_samples).cpu().detach().numpy()
disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
density_ratios[tmp:(tmp+batch_size_tmp)] = np.divide(disc_probs, 1-disc_probs)
tmp += batch_size_tmp
#end while
# print("\n End computing density ratio.")
return density_ratios[0:n_samples]
#-----------------------------------
elif args.DRE == 'disc_MHcal': #use the calibration method in MH-GAN to calibrate disc
n_test = valid_samples_tar.shape[0]
batch_size_tmp = 1000
cal_labels_fake = np.zeros((n_test,1))
cal_labels_real = np.ones((n_test,1))
cal_samples_fake = fn_sampleGAN(nfake=n_test, batch_size=batch_size_tmp)
dataset_fake = custom_dataset(cal_samples_fake)
dataloader_fake = torch.utils.data.DataLoader(dataset_fake, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
dataset_real = custom_dataset(valid_samples_tar)
dataloader_real = torch.utils.data.DataLoader(dataset_real, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
del cal_samples_fake; gc.collect()
# get the output of disc before the final sigmoid layer; the \tilde{D} in Eq.(4) in "Discriminator Rejection Sampling"
# def comp_disc_scores(samples_dataloader, netD):
def comp_disc_scores(samples_dataloader):
# samples_dataloader: the data loader for images
n_samples = len(samples_dataloader.dataset)
data_iter = iter(samples_dataloader)
batch_size_tmp = samples_dataloader.batch_size
disc_scores = np.zeros((n_samples+batch_size_tmp, 1))
netD.eval()
with torch.no_grad():
tmp = 0
while tmp < n_samples:
batch_samples,_ = data_iter.next()
batch_samples = batch_samples.type(torch.float).to(device)
disc_probs = netD(batch_samples).cpu().detach().numpy()
disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
disc_scores[tmp:(tmp+batch_size_tmp)] = np.log(np.divide(disc_probs, 1-disc_probs))
tmp += batch_size_tmp
#end while
return disc_scores[0:n_samples]
cal_disc_scores_fake = comp_disc_scores(dataloader_fake) #discriminator scores for fake images
cal_disc_scores_real = comp_disc_scores(dataloader_real) #discriminator scores for real images
# Train a logistic regression model
X_train = np.concatenate((cal_disc_scores_fake, cal_disc_scores_real),axis=0).reshape(-1,1)
y_train = np.concatenate((cal_labels_fake, cal_labels_real), axis=0).reshape(-1)
#del cal_disc_scores_fake, cal_disc_scores_real; gc.collect()
cal_logReg = LogisticRegression(solver="liblinear").fit(X_train, y_train)
# function for computing a bunch of images
# def comp_density_ratio(samples, netD):
def comp_density_ratio(samples):
#samples: an numpy array
dataset_tmp = custom_dataset(samples)
dataloader_tmp = torch.utils.data.DataLoader(dataset_tmp, batch_size=batch_size_tmp, shuffle=False, num_workers=0)
disc_scores = comp_disc_scores(dataloader_tmp)
disc_probs = (cal_logReg.predict_proba(disc_scores))[:,1] #second column corresponds to the real class
disc_probs = np.clip(disc_probs.astype(np.float), 1e-14, 1 - 1e-14)
density_ratios = np.divide(disc_probs, 1-disc_probs)
return density_ratios.reshape(-1,1)
#---------------------------------------------
# disc_KeepTrain
elif args.DRE == "disc_KeepTrain":
batch_size_KeepTrain = 256
Filename_KeepTrain_Disc = save_models_folder + '/ckpt_KeepTrainDisc_epoch_'+str(epoch_KeepTrain)+ '_SEED_' + str(args.seed) + '_nSim_' + str(nSim) + '_epochGAN_' + str(epoch_GAN)
if not os.path.isfile(Filename_KeepTrain_Disc):
print("Resume training Discriminator for %d epochs" % epoch_KeepTrain)
# keep train the discriminator
n_heldout = valid_samples_tar.data.shape[0]
batch_size_tmp = 500
cal_labels = np.concatenate(( | np.zeros((n_heldout,1)) | numpy.zeros |
import csv
import utils
import numpy as np
from domain import Domain
import random
from sklearn import svm
from sklearn.metrics import accuracy_score
import json
import pandas as pd
from main import main
import time
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from preprocess import read_preprocessed_data
from sklearn import preprocessing
import os
import multiprocessing as mp
# shuffle and split data
def split(data_name):
print(' split data')
data, headings = utils.tools.read_csv('./preprocess/'+data_name+'.csv')
data = np.array(data, dtype=int)
np.random.shuffle(data)
path = './exp_data'
if not os.path.exists(path):
os.mkdir(path)
utils.tools.write_csv(data[:int(0.8*len(data))], headings, './exp_data/'+data_name+'_train.csv')
utils.tools.write_csv(data[int(0.8*len(data)):], headings, './exp_data/'+data_name+'_test.csv')
# evaluate dp data on k way marginal task
def k_way_marginal(data_name, dp_data_list, k, marginal_num):
# data, headings = utils.tools.read_csv('./exp_data/' + data_name + '_train.csv')
data, headings = utils.tools.read_csv('./preprocess/' + data_name + '.csv', print_info=False)
data = np.array(data, dtype=int)
attr_num = data.shape[1]
data_num = data.shape[0]
# attr_num = 10
domain = json.load(open('./preprocess/'+data_name+'.json'))
domain = {int(key): domain[key] for key in domain}
domain = Domain(domain, list(range(attr_num)))
marginal_list = [tuple(sorted(list(np.random.choice(attr_num, k, replace=False)))) for i in range(marginal_num)]
marginal_dict = {}
size_limit = 1e8
for marginal in marginal_list:
temp_domain = domain.project(marginal)
if temp_domain.size() < size_limit:
# It is fast when domain is small, howerver it will allocate very large array
edge = temp_domain.edge()
histogram, _ = np.histogramdd(data[:, marginal], bins=edge)
marginal_dict[marginal] = histogram
else:
uniques, cnts = | np.unique(data, return_counts=True, axis=0) | numpy.unique |
import numpy as np
import pandas as pd
import pdb
import re
from time import time
import json
import random
import os
import model
import paths
from scipy.spatial.distance import pdist, squareform
from scipy.stats import multivariate_normal, invgamma, mode
from scipy.special import gamma
# from scipy.misc import imresize
from functools import partial
from math import ceil
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.preprocessing import MinMaxScaler
# --- to do with loading --- #
def get_samples_and_labels(settings):
"""
Parse settings options to load or generate correct type of data,
perform test/train split as necessary, and reform into 'samples' and 'labels'
dictionaries.
"""
if settings['data_load_from']:
data_path = './experiments/data/' + settings['data_load_from'] + '.data.npy'
print('Loading data from', data_path)
samples, pdf, labels = get_data('load', data_path)
train, vali, test = samples['train'], samples['vali'], samples['test']
train_labels, vali_labels, test_labels = labels['train'], labels['vali'], labels['test']
del samples, labels
else:
# generate the data
data_vars = ['num_samples', 'seq_length', 'num_signals', 'freq_low',
'freq_high', 'amplitude_low', 'amplitude_high', 'scale',
'full_mnist']
data_settings = dict((k, settings[k]) for k in data_vars if k in settings.keys())
samples, pdf, labels = get_data(settings['data'], data_settings)
if 'multivariate_mnist' in settings and settings['multivariate_mnist']:
seq_length = samples.shape[1]
samples = samples.reshape(-1, int(np.sqrt(seq_length)), int(np.sqrt(seq_length)))
if 'normalise' in settings and settings['normalise']: # TODO this is a mess, fix
print("monish")
print(settings['normalise'])
norm = True
else:
norm = False
if labels is None:
train, vali, test = split(samples, [0.6, 0.2, 0.2], normalise=norm)
train_labels, vali_labels, test_labels = None, None, None
else:
train, vali, test, labels_list = split(samples, [0.6, 0.2, 0.2], normalise=norm, labels=labels)
train_labels, vali_labels, test_labels = labels_list
labels = dict()
labels['train'], labels['vali'], labels['test'] = train_labels, vali_labels, test_labels
samples = dict()
samples['train'], samples['vali'], samples['test'] = train, vali, test
# update the settings dictionary to update erroneous settings
# (mostly about the sequence length etc. - it gets set by the data!)
settings['seq_length'] = samples['train'].shape[1]
settings['num_samples'] = samples['train'].shape[0] + samples['vali'].shape[0] + samples['test'].shape[0]
settings['num_signals'] = samples['train'].shape[2]
settings['num_generated_features'] = samples['train'].shape[2]
return samples, pdf, labels
def get_data(data_type, data_options=None):
"""
Helper/wrapper function to get the requested data.
"""
labels = None
pdf = None
if data_type == 'load':
data_dict = np.load(data_options).item()
samples = data_dict['samples']
pdf = data_dict['pdf']
labels = data_dict['labels']
elif data_type == 'sine':
samples = sine_wave(**data_options)
elif data_type == 'mnist':
if data_options['full_mnist']:
samples, labels = mnist()
else:
#samples, labels = load_resized_mnist_0_5(14)
samples, labels = load_resized_mnist(14) # this is the 0-2 setting
elif data_type == 'gp_rbf':
print(data_options)
samples, pdf = GP(**data_options, kernel='rbf')
elif data_type == 'linear':
samples, pdf = linear(**data_options)
else:
raise ValueError(data_type)
print('Generated/loaded', len(samples), 'samples from data-type', data_type)
return samples, pdf, labels
def get_batch(samples, batch_size, batch_idx, labels=None):
start_pos = batch_idx * batch_size
end_pos = start_pos + batch_size
if labels is None:
return samples[start_pos:end_pos], None
else:
if type(labels) == tuple: # two sets of labels
assert len(labels) == 2
return samples[start_pos:end_pos], labels[0][start_pos:end_pos], labels[1][start_pos:end_pos]
else:
assert type(labels) == np.ndarray
return samples[start_pos:end_pos], labels[start_pos:end_pos]
def normalise_data(train, vali, test, low=-1, high=1):
""" Apply some sort of whitening procedure
"""
# remember, data is num_samples x seq_length x signals
# whiten each signal - mean 0, std 1
mean = np.mean(np.vstack([train, vali]), axis=(0, 1))
std = np.std(np.vstack([train-mean, vali-mean]), axis=(0, 1))
normalised_train = (train - mean)/std
normalised_vali = (vali - mean)/std
normalised_test = (test - mean)/std
# normalised_data = data - np.nanmean(data, axis=(0, 1))
# normalised_data /= np.std(data, axis=(0, 1))
# # normalise samples to be between -1 and +1
# normalise just using train and vali
# min_val = np.nanmin(np.vstack([train, vali]), axis=(0, 1))
# max_val = np.nanmax(np.vstack([train, vali]), axis=(0, 1))
#
# normalised_train = (train - min_val)/(max_val - min_val)
# normalised_train = (high - low)*normalised_train + low
#
# normalised_vali = (vali - min_val)/(max_val - min_val)
# normalised_vali = (high - low)*normalised_vali + low
#
# normalised_test = (test - min_val)/(max_val - min_val)
# normalised_test = (high - low)*normalised_test + low
return normalised_train, normalised_vali, normalised_test
def scale_data(train, vali, test, scale_range=(-1, 1)):
signal_length = train.shape[1]
num_signals = train.shape[2]
# reshape everything
train_r = train.reshape(-1, signal_length*num_signals)
vali_r = vali.reshape(-1, signal_length*num_signals)
test_r = test.reshape(-1, signal_length*num_signals)
# fit scaler using train, vali
scaler = MinMaxScaler(feature_range=scale_range).fit(np.vstack([train_r, vali_r]))
# scale everything
scaled_train = scaler.transform(train_r).reshape(-1, signal_length, num_signals)
scaled_vali = scaler.transform(vali_r).reshape(-1, signal_length, num_signals)
scaled_test = scaler.transform(test_r).reshape(-1, signal_length, num_signals)
return scaled_train, scaled_vali, scaled_test
def split(samples, proportions, normalise=False, scale=False, labels=None, random_seed=None):
"""
Return train/validation/test split.
"""
if random_seed != None:
random.seed(random_seed)
np.random.seed(random_seed)
assert np.sum(proportions) == 1
n_total = samples.shape[0]
n_train = ceil(n_total*proportions[0])
n_test = ceil(n_total*proportions[2])
n_vali = n_total - (n_train + n_test)
# permutation to shuffle the samples
shuff = np.random.permutation(n_total)
train_indices = shuff[:n_train]
vali_indices = shuff[n_train:(n_train + n_vali)]
test_indices = shuff[(n_train + n_vali):]
# TODO when we want to scale we can just return the indices
assert len(set(train_indices).intersection(vali_indices)) == 0
assert len(set(train_indices).intersection(test_indices)) == 0
assert len(set(vali_indices).intersection(test_indices)) == 0
# split up the samples
train = samples[train_indices]
vali = samples[vali_indices]
test = samples[test_indices]
# apply the same normalisation scheme to all parts of the split
if normalise:
if scale: raise ValueError(normalise, scale) # mutually exclusive
train, vali, test = normalise_data(train, vali, test)
elif scale:
train, vali, test = scale_data(train, vali, test)
if labels is None:
return train, vali, test
else:
print('Splitting labels...')
if type(labels) == np.ndarray:
train_labels = labels[train_indices]
vali_labels = labels[vali_indices]
test_labels = labels[test_indices]
labels_split = [train_labels, vali_labels, test_labels]
elif type(labels) == dict:
# more than one set of labels! (weird case)
labels_split = dict()
for (label_name, label_set) in labels.items():
train_labels = label_set[train_indices]
vali_labels = label_set[vali_indices]
test_labels = label_set[test_indices]
labels_split[label_name] = [train_labels, vali_labels, test_labels]
else:
raise ValueError(type(labels))
return train, vali, test, labels_split
def make_predict_labels(samples, labels):
""" Given two dictionaries of samples, labels (already normalised, split etc)
append the labels on as additional signals in the data
"""
print('Appending label to samples')
assert not labels is None
if len(labels['train'].shape) > 1:
num_labels = labels['train'].shape[1]
else:
num_labels = 1
seq_length = samples['train'].shape[1]
num_signals = samples['train'].shape[2]
new_samples = dict()
new_labels = dict()
for (k, X) in samples.items():
num_samples = X.shape[0]
lab = labels[k]
# slow code because i am sick and don't want to try to be smart
new_X = np.zeros(shape=(num_samples, seq_length, num_signals + num_labels))
for row in range(num_samples):
new_X[row, :, :] = np.hstack([X[row, :, :], np.array(seq_length*[(2*lab[row]-1).reshape(num_labels)])])
new_samples[k] = new_X
new_labels[k] = None
return new_samples, new_labels
# --- specific data-types --- #
def mnist(randomize=False):
""" Load and serialise """
try:
train = np.load('./experiments/data/mnist_train.npy')
print('Loaded mnist from .npy')
except IOError:
print('Failed to load MNIST data from .npy, loading from csv')
# read from the csv
train = np.loadtxt(open('./experiments/data/mnist_train.csv', 'r'), delimiter=',')
# scale samples from 0 to 1
train[:, 1:] /= 255
# scale from -1 to 1
train[:, 1:] = 2*train[:, 1:] - 1
# save to the npy
np.save('./experiments/data/mnist_train.npy', train)
# the first column is labels, kill them
labels = train[:, 0]
samples = train[:, 1:]
if randomize:
# not needed for GAN experiments...
print('Applying fixed permutation to mnist digits.')
fixed_permutation = np.random.permutation(28*28)
samples = train[:, fixed_permutation]
samples = samples.reshape(-1, 28*28, 1)
# add redundant additional signals
return samples, labels
def sine_wave(seq_length=30, num_samples=28*5*100, num_signals=1,
freq_low=1, freq_high=5, amplitude_low = 0.1, amplitude_high=0.9, **kwargs):
ix = np.arange(seq_length) + 1
samples = []
for i in range(num_samples):
signals = []
for i in range(num_signals):
f = np.random.uniform(low=freq_high, high=freq_low) # frequency
A = np.random.uniform(low=amplitude_high, high=amplitude_low) # amplitude
# offset
offset = np.random.uniform(low=-np.pi, high=np.pi)
signals.append(A*np.sin(2*np.pi*f*ix/float(seq_length) + offset))
samples.append(np.array(signals).T)
# the shape of the samples is num_samples x seq_length x num_signals
samples = np.array(samples)
return samples
def periodic_kernel(T, f=1.45/30, gamma=7.0, A=0.1):
"""
Calculates periodic kernel between all pairs of time points (there
should be seq_length of those), returns the Gram matrix.
f is frequency - higher means more peaks
gamma is a scale, smaller makes the covariance peaks shallower (smoother)
Heuristic for non-singular rbf:
periodic_kernel(np.arange(len), f=1.0/(0.79*len), A=1.0, gamma=len/4.0)
"""
dists = squareform(pdist(T.reshape(-1, 1)))
cov = A*np.exp(-gamma*(np.sin(2*np.pi*dists*f)**2))
return cov
def GP(seq_length=30, num_samples=28*5*100, num_signals=1, scale=0.1, kernel='rbf', **kwargs):
# the shape of the samples is num_samples x seq_length x num_signals
samples = np.empty(shape=(num_samples, seq_length, num_signals))
#T = np.arange(seq_length)/seq_length # note, between 0 and 1
T = np.arange(seq_length) # note, not between 0 and 1
if kernel == 'periodic':
cov = periodic_kernel(T)
elif kernel =='rbf':
cov = rbf_kernel(T.reshape(-1, 1), gamma=scale)
else:
raise NotImplementedError
# scale the covariance
cov *= 0.2
# define the distribution
mu = np.zeros(seq_length)
print(np.linalg.det(cov))
distribution = multivariate_normal(mean=np.zeros(cov.shape[0]), cov=cov)
pdf = distribution.logpdf
# now generate samples
for i in range(num_signals):
samples[:, :, i] = distribution.rvs(size=num_samples)
return samples, pdf
def linear_marginal_likelihood(Y, X, a0, b0, mu0, lambda0, log=True, **kwargs):
"""
Marginal likelihood for linear model.
See https://en.wikipedia.org/wiki/Bayesian_linear_regression pretty much
"""
seq_length = Y.shape[1] # note, y is just a line (one channel) TODO
n = seq_length
an = a0 + 0.5*n
XtX = np.dot(X.T, X)
lambdan = XtX + lambda0
prefactor = (2*np.pi)**(-0.5*n)
dets = np.sqrt(np.linalg.det(lambda0)/np.linalg.det(lambdan))
marginals = np.empty(Y.shape[0])
for (i, y) in enumerate(Y):
y_reshaped = y.reshape(seq_length)
betahat = np.dot(np.linalg.inv(XtX), np.dot(X.T, y_reshaped))
mun = np.dot(np.linalg.inv(lambdan), np.dot(XtX, betahat) + np.dot(lambda0, mu0))
bn = b0 + 0.5*( | np.dot(y_reshaped.T, y_reshaped) | numpy.dot |
import numpy as np
from .helper_finder import BinomialModel, np_get_helper_to_predicted_helper_probs, get_helper_to_predicted_helper_probs
from .helper_variants import PriorModel
def get_masked_calc_func(score_func, mask):
print("MASKED", np.sum(mask))
mask = np.where(mask, -np.inf, 0)
def masked_score_func(count_matrix, offset):
m = mask[:-offset] if offset>0 else mask[-offset:]
return score_func(count_matrix)+m
return masked_score_func
def get_weighted_calc_func(score_func, weights, k=1):
def weighted_score_func(count_matrix, offset):
w = weights[:-offset] if offset>0 else weights[-offset:]
return score_func(count_matrix)+w*k
return weighted_score_func
def get_prob_weights(k_r, k_a, genotype_probs):
model = BinomialModel(k_r, k_a)
prior_model = PriorModel(model, np.log((genotype_probs)))
prob_correct = get_prob_correct(prior_model)
return | np.log(prob_correct) | numpy.log |
import itertools
import os
from flask import Flask
import numpy as np
import pandas as pd
import pulp
from sheetfu import SpreadsheetApp
app = Flask(__name__)
THRESH = .25
N_GAMES = 5
ssid = '<KEY>'
def _game_combos(team_combos, n_games):
"""Creates game combinations from team combinations
Args:
team_combos (list[tuple]): the team combinations
n_games (int): number of games to schedule
Returns:
list[tuple]
"""
# calculate game combinations
# each item is a 3-tuple of tuple(team1), tuple(team2), game_number
# the set intersection ensures no common elements between teams
legal_games = [(t[0], t[1]) for t in pulp.combination(team_combos, 2)
if not set(t[0]) & set(t[1])]
return [(t1, t2, game_number)
for game_number in np.arange(n_games) + 1
for t1, t2 in legal_games]
def _game_scores(game_combos, s):
"""Creates game scores from mapping
Args:
game_combos (list[tuple]): the game combos
s (dict[str, float]): the game scores
Returns:
dict[tuple, float]
"""
# calculate game score differential
game_scores = {}
for gc in game_combos:
p1, p2 = gc[0]
p3, p4 = gc[1]
game_scores[(gc[0], gc[1])] = np.abs((s[p1] + s[p2]) - (s[p3] + s[p4]))
return game_scores
def _optimize(team_combos, game_combos, game_scores, p, n_games, solver=None):
"""Creates game scores from mapping
Args:
team_combos (list[tuple]): the team combos
game_combos (list[tuple]): the game combos
game_scores (dict[tuple, float]): the game scores
p (list[str]): player names
n_games (int): number of games
solver (pulp.apis.core.LpSolver): optional solver
Returns:
pulp.LpProblem
"""
# decision variables
gcvars = pulp.LpVariable.dicts('gc_decvar', game_combos, cat=pulp.LpBinary)
# create problem
# minimize game scores subject to constraints
prob = pulp.LpProblem("PBOpt", pulp.LpMinimize)
# objective function
# minimize difference between team scores
prob += pulp.lpSum([gcvars[gc] * game_scores[(gc[0], gc[1])] for gc in game_combos])
# constraints
# no game scores > 1
for gc in game_combos:
prob += gcvars[gc] * game_scores[(gc[0], gc[1])] <= 1
# each player must have n_games games
for player in p:
prob += pulp.lpSum([v for k, v in gcvars.items()
if (player in k[0] or player in k[1])]) == n_games
# each player has 1 game per game_number
for player in p:
for game_number in np.arange(1, n_games + 1):
prob += pulp.lpSum([v for k, v in gcvars.items()
if (player in k[0] or player in k[1]) and
k[2] == game_number]) == 1
# do not play with a player more than once
# do not play against a player more than twice
for player, pplayer in itertools.combinations(p, 2):
prob += pulp.lpSum([v for k, v in gcvars.items()
if (player in k[0] and pplayer in k[0]) or
(player in k[1] and pplayer in k[1])]) <= 2
prob += pulp.lpSum([v for k, v in gcvars.items()
if (player in k[0] and pplayer in k[1]) or
(player in k[1] and pplayer in k[0])]) <= 3
# solve the problem
if not solver:
solver = pulp.getSolver('PULP_CBC_CMD', timeLimit=600, gapAbs=2)
prob.solve(solver)
return prob, gcvars
def _solution(gcvars, s):
"""Inspects solution
Args:
gcvars (dict[str, LpVariable]): the decision variables
Returns:
DataFrame
"""
# look at solution
df = pd.DataFrame(data=[k for k, v in gcvars.items() if v.varValue == 1],
columns=['Team1', 'Team2', 'Round#'])
df = df.sort_values('Round#')
df['Team1_score'] = df['Team1'].apply(lambda x: sum(s.get(i) for i in x))
df['Team2_score'] = df['Team2'].apply(lambda x: sum(s.get(i) for i in x))
df = df.assign(Combined_score=lambda x: x['Team1_score'] + x['Team2_score'])
df = df.assign(Score_diff=lambda x: ( | np.abs(x['Team1_score'] - x['Team2_score']) | numpy.abs |
import numpy as np
import numpy.random as npr
import math
import pandas as pd
def WongChanSimCov(n):
Z = npr.normal(size=(n, 10))
X = np.zeros((n, 10))
X[:,0] = | np.exp(Z[:,0]/2.) | numpy.exp |
import os
import copy
import glob
import numpy as np
from gains import Absorber
import corner
from utils import (fit_2d_gmm, vcomplex, nested_ddict, make_ellipses,
baselines_2_ants, find_outliers_2d_mincov,
find_outliers_2d_dbscan, find_outliers_dbscan, fit_kde,
fit_2d_kde, hdi_of_mcmc, hdi_of_sample, bc_endpoint, ants_2_baselines)
import matplotlib
from uv_data import UVData
from from_fits import create_model_from_fits_file
from model import Model
from spydiff import import_difmap_model, modelfit_difmap
from spydiff import modelfit_difmap
matplotlib.use('Agg')
label_size = 12
matplotlib.rcParams['xtick.labelsize'] = label_size
matplotlib.rcParams['ytick.labelsize'] = label_size
def xy_2_rtheta(params):
flux, x, y = params[:3]
r = np.sqrt(x ** 2 + y ** 2)
theta = np.rad2deg(np.arctan(x / y))
result = [flux, r, theta]
try:
result.extend(params[3:])
except IndexError:
pass
return result
def boot_ci(boot_images, original_image, cred_mass=0.68, kind=None):
"""
Calculate bootstrap CI.
:param boot_images:
Iterable of 2D numpy arrays with bootstrapped images.
:param original_image:
2D numpy array with original image.
:param kind: (optional)
Type of CI. "asym", "bc" or None. If ``None`` than symmetric one.
(default: ``None``)
:return:
Two numpy arrays with low and high CI borders for each pixel.
"""
images_cube = np.dstack(boot_images)
boot_ci = np.zeros(np.shape(images_cube[:, :, 0]))
mean_boot = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_0 = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_1 = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_low = np.zeros(np.shape(images_cube[:, :, 0]))
hdi_high = np.zeros(np.shape(images_cube[:, :, 0]))
alpha = 1 - cred_mass
print("calculating CI intervals")
if kind == "bc":
for (x, y), value in np.ndenumerate(boot_ci):
hdi_low[x, y] = bc_endpoint(images_cube[x, y, :], original_image[x, y], alpha/2.)
hdi_high[x, y] = bc_endpoint(images_cube[x, y, :], original_image[x, y], 1-alpha/2.)
else:
for (x, y), value in np.ndenumerate(boot_ci):
hdi = hdi_of_sample(images_cube[x, y, :], cred_mass=cred_mass)
boot_ci[x, y] = hdi[1] - hdi[0]
hdi_0[x, y] = hdi[0]
hdi_1[x, y] = hdi[1]
mean_boot[x, y] = np.mean(images_cube[x, y, :])
if kind == 'asym':
hdi_low = original_image - (mean_boot - hdi_0)
hdi_high = original_image + hdi_1 - mean_boot
else:
hdi_low = original_image - boot_ci / 2.
hdi_high = original_image + boot_ci / 2.
return hdi_low, hdi_high
def analyze_bootstrap_samples(dfm_model_fname, booted_mdl_paths,
dfm_model_dir=None, plot_comps=None,
plot_file=None, txt_file=None, cred_mass=0.68,
coordinates='xy', out_samples_path=None,
limits=None, fig=None):
"""
Plot bootstrap distribution of model component parameters.
:param dfm_model_fname:
File name of original difmap model.
:param booted_mdl_paths:
Iterable of paths to bootstrapped difmap models.
:param dfm_model_dir: (optional)
Directory with original difmap model. If ``None`` then CWD. (default:
``None``)
:param plot_comps: (optional)
Iterable of components number to plot on same plot. If ``None`` then
plot parameter distributions of all components.
:param plot_file: (optional)
File to save picture. If ``None`` then don't save picture. (default:
``None``)
:param txt_file: (optional)
File to save credible intervals for parameters. If ``None`` then don't
save credible intervals. (default: ``None``)
:param cred_mass: (optional)
Value of credible interval mass. Float in range (0., 1.). (default:
``0.68``)
:param coordinates: (optional)
Type of coordinates to use. ``xy`` or ``rtheta``. (default: ``xy``)
"""
n_boot = len(booted_mdl_paths)
# Get params of initial model used for bootstrap
comps_orig = import_difmap_model(dfm_model_fname, dfm_model_dir)
comps_params0 = {i: [] for i in range(len(comps_orig))}
for i, comp in enumerate(comps_orig):
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
else:
raise Exception
comps_params0[i].extend(list(params))
# Load bootstrap models
comps_params = {i: [] for i in range(len(comps_orig))}
for booted_mdl_path in booted_mdl_paths:
path, booted_mdl_file = os.path.split(booted_mdl_path)
comps = import_difmap_model(booted_mdl_file, path)
for i, comp in enumerate(comps):
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
else:
raise Exception
comps_params[i].extend(list(params))
comps_to_plot = [comps_orig[k] for k in plot_comps]
# (#boot, #parameters)
boot_data = np.hstack(np.array(comps_params[i]).reshape((n_boot,
comps_orig[i].size)) for
i in plot_comps)
# Save all bootstrap samples to file optionally
if out_samples_path:
boot_data_all = np.hstack(np.array(comps_params[i]).reshape((n_boot,
comps_orig[i].size)) for
i in range(len(comps_orig)))
np.savetxt(out_samples_path, boot_data_all)
# Optionally plot
figure = None
if plot_file:
if corner:
lens = list(np.cumsum([comp.size for comp in comps_orig]))
lens.insert(0, 0)
labels = list()
for comp in comps_to_plot:
for lab in np.array(comp._parnames)[~comp._fixed]:
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
if coordinates == 'rtheta':
if lab == 'x':
lab = 'r'
if lab == 'y':
lab = 'theta'
elif coordinates == 'xy':
pass
else:
raise Exception
labels.append(r'' + '$' + lab + '$')
try:
n = sum([c.size for c in comps_to_plot])
if fig is None:
fig, axes = matplotlib.pyplot.subplots(nrows=n, ncols=n)
fig.set_size_inches(16.5, 16.5)
corner.corner(boot_data, labels=labels, plot_contours=True,
plot_datapoints=False, color='gray',
levels=[0.68,0.95],
# smooth=0.5,
# bins=20,
# fill_contours=True,
# range=limits,
truths=np.hstack([comps_params0[i] for i in
plot_comps]),
title_kwargs={"fontsize": 14},
label_kwargs={"fontsize": 14},
quantiles=[0.16, 0.5, 0.84], fig=fig,
# show_titles=True,
hist_kwargs={'normed': True,
'histtype': 'step',
'stacked': True,
'ls': 'solid'},
title_fmt=".4f", max_n_ticks=3)
# figure.gca().annotate("Components {}".format(plot_comps),
# xy=(0.5, 1.0),
# xycoords="figure fraction",
# xytext=(0, -5),
# textcoords="offset points", ha="center",
# va="top")
# figure.savefig(plot_file, format='eps', dpi=600)
except (ValueError, RuntimeError) as e:
with open(plot_file + '_failed_plot', 'w'):
print("Failed to plot... ValueError")
else:
print("Install ``corner`` for corner-plots")
if txt_file:
# Print credible intervals
fn = open(txt_file, 'w')
fn.write("# parameter original.value low.boot high.boot mean.boot"
" median.boot (mean-low).boot (high-mean).boot\n")
recorded = 0
for i in plot_comps:
comp = comps_orig[i]
for j in range(comp.size):
low, high, mean, median = hdi_of_mcmc(boot_data[:, recorded+j],
cred_mass=cred_mass,
return_mean_median=True)
# FIXME: Move (x, y) <-> (r, theta) mapping to ``Component``
parnames = comp._parnames
if coordinates == 'xy':
params = comp.p
elif coordinates == 'rtheta':
params = xy_2_rtheta(comp.p)
parnames[1] = 'r'
parnames[2] = 'theta'
else:
raise Exception
fn.write("{:<4} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}"
" {:.6f}".format(parnames[j], params[j], low,
high, mean, median, abs(median - low),
abs(high - median)))
fn.write("\n")
recorded += (j + 1)
fn.close()
return fig
# TODO: Check that numbering of bootstrapped data and their models is OK
def bootstrap_uvfits_with_difmap_model(uv_fits_path, dfm_model_path,
nonparametric=False, use_kde=True,
use_v=False, n_boot=100, stokes='I',
boot_dir=None, recenter=True,
clean_after=True,
out_txt_file='txt.txt',
out_plot_file='plot.png',
pairs=False, niter=100,
bootstrapped_uv_fits=None,
additional_noise=None,
out_rchisq_file=None):
dfm_model_dir, dfm_model_fname = os.path.split(dfm_model_path)
comps = import_difmap_model(dfm_model_fname, dfm_model_dir)
if boot_dir is None:
boot_dir = os.getcwd()
if bootstrapped_uv_fits is None:
uvdata = UVData(uv_fits_path)
model = Model(stokes=stokes)
model.add_components(*comps)
boot = CleanBootstrap([model], uvdata, additional_noise=additional_noise)
os.chdir(boot_dir)
boot.run(nonparametric=nonparametric, use_kde=use_kde, recenter=recenter,
use_v=use_v, n=n_boot, pairs=pairs)
bootstrapped_uv_fits = sorted(glob.glob(os.path.join(boot_dir,
'bootstrapped_data*.fits')))
out_rchisq = list()
for j, bootstrapped_fits in enumerate(bootstrapped_uv_fits):
rchisq = modelfit_difmap(bootstrapped_fits, dfm_model_fname,
'mdl_booted_{}.mdl'.format(j),
path=boot_dir, mdl_path=dfm_model_dir,
out_path=boot_dir, niter=niter,
show_difmap_output=True)
out_rchisq.append(rchisq)
print("Finished modelfit of {}th bootstrapped data with with"
" RChiSq = {}".format(j, rchisq))
if out_rchisq_file is not None:
np.savetxt(out_rchisq_file, np.array(out_rchisq))
booted_mdl_paths = glob.glob(os.path.join(boot_dir, 'mdl_booted*'))
fig = analyze_bootstrap_samples(dfm_model_fname, booted_mdl_paths, dfm_model_dir,
plot_comps=range(len(comps)),
plot_file=out_plot_file, txt_file=out_txt_file)
# Clean
if clean_after:
for file_ in bootstrapped_uv_fits:
os.unlink(file_)
for file_ in booted_mdl_paths:
os.unlink(file_)
return fig
def create_random_D_dict(uvdata, sigma_D):
"""
Create dictionary with random D-terms for each antenna/IF/polarization.
:param uvdata:
Instance of ``UVData`` to generate D-terms.
:param sigma_D:
D-terms residual noise or mapping from antenna names to residual D-term std.
:return:
Dictionary with keys [antenna name][integer of IF]["R"/"L"]
"""
import collections
d_dict = dict()
for ant in list(uvdata.antenna_mapping.values()):
d_dict[ant] = dict()
for band in range(uvdata.nif):
d_dict[ant][band] = dict()
for pol in ("R", "L"):
# Generating two random complex numbers near (0, 0)
if isinstance(sigma_D, collections.Mapping):
rands = np.random.normal(loc=0, scale=sigma_D[ant], size=2)
else:
rands = np.random.normal(loc=0, scale=sigma_D, size=2)
d_dict[ant][band][pol] = rands[0]+1j*rands[1]
return d_dict
# TODO: Workaround if no antenna/pol/IF informtation is available from dict
def create_const_amp_D_dict(uvdata, amp_D, per_antenna=True):
"""
Create dictionary with random D-terms for each antenna/IF/polarization.
:param uvdata:
Instance of ``UVData`` to generate D-terms.
:param amp_D:
D-terms amplitude. Float or mappable with keys [antenna] or
[antenna][pol][IF] (depending on ``per_antenna``) and values - residual
D-term amplitude.
:param per_antenna: (optional)
Boolean. If ``amp_D`` mapping from antenna to Ds or full (IF/pol)?
(default: ``True``)
:return:
Dictionary with keys [antenna name][integer of IF]["R"/"L"] and values -
D-terms.
"""
import collections
d_dict = dict()
for ant in list(uvdata.antenna_mapping.values()):
d_dict[ant] = dict()
for band in range(uvdata.nif):
d_dict[ant][band] = dict()
for pol in ("R", "L"):
# Generating random complex number near (0, 0)
phase = np.random.uniform(-np.pi, np.pi, size=1)[0]
if isinstance(amp_D, collections.Mapping):
if per_antenna:
amp = amp_D[ant]
else:
amp = amp_D[ant][pol][band]
else:
amp = amp_D
d_dict[ant][band][pol] = amp*(np.cos(phase)+1j*np.sin(phase))
return d_dict
def create_const_D_dict(uvdata, amp_D, phase_D):
"""
Create dictionary with random D-terms for each antenna/IF/polarization.
:param uvdata:
Instance of ``UVData`` to generate D-terms.
:param amp_D:
D-terms amplitude.
:return:
Dictionary with keys [antenna name][integer of IF]["R"/"L"]
"""
d_dict = dict()
for baseline in uvdata.baselines:
print(baseline)
ant1, ant2 = baselines_2_ants([baseline])
antname1 = uvdata.antenna_mapping[ant1]
antname2 = uvdata.antenna_mapping[ant2]
d_dict[antname1] = dict()
d_dict[antname2] = dict()
for band in range(uvdata.nif):
d_dict[antname1][band] = dict()
d_dict[antname2][band] = dict()
for pol in ("R", "L"):
# Generating random complex number near (0, 0)
d_dict[antname1][band][pol] = amp_D*(np.cos(phase_D)+1j*np.sin(phase_D))
d_dict[antname2][band][pol] = amp_D*(np.cos(phase_D)+1j*np.sin(phase_D))
return d_dict
# TODO: Add 0.632-estimate of extra-sample error.
class Bootstrap(object):
"""
Basic class for bootstrapping data using specified model.
:param models:
Iterable of ``Model`` subclass instances that represent model used for
bootstrapping.. There should be only one (or zero) model for each stokes
parameter. If there are two, say I-stokes models, then sum them firstly
using ``Model.__add__``.
:param uvdata:
Instance of ``UVData`` class.
"""
def __init__(self, models, uvdata):
self.models = models
self.model_stokes = [model.stokes for model in models]
self.data = uvdata
self.model_data = copy.deepcopy(uvdata)
self.model_data.substitute(models)
self.residuals = self.get_residuals()
self.noise_residuals = None
# Dictionary with keys - baseline, #IF, #Stokes and values - instances
# of ``sklearn.neighbors.KernelDensity`` class fitted on the residuals
# (Re&Im) of key baselines
self._residuals_fits = nested_ddict()
# Dictionary with keys - baseline, #IF, #Stokes and values - instances
# of ``sklearn.neighbors.KernelDensity`` class fitted on the residuals
# (Re&Im) of key baselines
self._residuals_fits_2d = nested_ddict()
# Dictionary with keys - baseline, #scan, #IF, #Stokes and values -
# instances of ``sklearn.neighbors.KernelDensity`` class fitted on the
# residuals (Re&Im)
self._residuals_fits_scans = nested_ddict()
# Dictionary with keys - baselines & values - tuples with centers of
# real & imag residuals for that baseline
self._residuals_centers = nested_ddict()
self._residuals_centers_scans = nested_ddict()
# Dictionary with keys - baseline, #IF, #Stokes and value - boolean
# numpy array with outliers
self._residuals_outliers = nested_ddict()
# Dictionary with keys - baseline, #scan, #IF, #Stokes and value -
# boolean numpy array with outliers
self._residuals_outliers_scans = nested_ddict()
def get_residuals(self):
"""
Implements different residuals calculation.
:return:
Residuals between model and data.
"""
raise NotImplementedError
def plot_residuals_trio(self, outname, split_scans=True, freq_average=False,
IF=None, stokes=['RR']):
if IF is None:
IF = range(self.residuals.nif)
if stokes is None:
stokes = range(self.residuals.nstokes)
else:
stokes_list = list()
for stoke in stokes:
print("Parsing {}".format(stoke))
print(self.residuals.stokes)
stokes_list.append(self.residuals.stokes.index(stoke))
stokes = stokes_list
print("Plotting IFs {}".format(IF))
print("Plotting Stokes {}".format(stokes))
for baseline in self.residuals.baselines:
print(baseline)
ant1, ant2 = baselines_2_ants([baseline])
if split_scans:
try:
for i, indxs in enumerate(self.residuals._indxs_baselines_scans[baseline]):
# Complex (#, #IF, #stokes)
data = self.residuals.uvdata[indxs]
# weights = self.residuals.weights[indxs]
if freq_average:
raise NotImplementedError
# # FIXME: Aberage w/o outliers
# # Complex (#, #stokes)
# data = np.mean(data, axis=1)
# for stoke in stokes:
# # Complex 1D array to plot
# data_ = data[:, stoke]
# fig, axes = matplotlib.pyplot.subplots(nrows=2,
# ncols=2)
# matplotlib.pyplot.rcParams.update({'axes.titlesize':
# 'small'})
# axes[1, 0].plot(data_.real, data_.imag, '.k')
# axes[1, 0].axvline(0.0, lw=0.2, color='g')
# axes[1, 0].axhline(0.0, lw=0.2, color='g')
# axes[0, 0].hist(data_.real, bins=10,
# label="Re {}-{}".format(ant1, ant2),
# color="#4682b4")
# legend = axes[0, 0].legend(fontsize='small')
# axes[0, 0].axvline(0.0, lw=1, color='g')
# axes[1, 1].hist(data_.imag, bins=10, color="#4682b4",
# orientation='horizontal',
# label="Im {}-{}".format(ant1, ant2))
# legend = axes[1, 1].legend(fontsize='small')
# axes[1, 1].axhline(0.0, lw=1, color='g')
# fig.savefig("res_2d_bl{}_st{}_scan_{}".format(baseline, stoke, i),
# bbox_inches='tight', dpi=400)
# matplotlib.pyplot.close()
else:
for IF_ in IF:
for stoke in stokes:
# Complex 1D array to plot
data_ = data[:, IF_, stoke]
# weigths_ = weights[:, IF_, stoke]
# data_pw = data_[weigths_ > 0]
data_pw = data_[self.residuals._pw_indxs[indxs, IF_, stokes]]
data_nw = data_[self.residuals._nw_indxs[indxs, IF_, stokes]]
data_out = data_pw[self._residuals_outliers_scans[baseline][i][IF_][stoke]]
# data_nw = data_[weigths_ <= 0]
fig, axes = matplotlib.pyplot.subplots(nrows=2,
ncols=2)
matplotlib.pyplot.rcParams.update({'axes.titlesize':
'small'})
axes[1, 0].plot(data_.real, data_.imag, '.k')
axes[1, 0].plot(data_nw.real, data_nw.imag, '.', color='orange')
axes[1, 0].plot(data_out.real, data_out.imag, '.r')
try:
x_c, y_c = self._residuals_centers_scans[baseline][i][IF_][stoke]
axes[1, 0].plot(x_c, y_c, '.y')
except ValueError:
x_c, y_c = 0., 0.
axes[1, 0].axvline(0.0, lw=0.2, color='g')
axes[1, 0].axhline(0.0, lw=0.2, color='g')
axes[0, 0].hist(data_.real, bins=10,
label="Re "
"{}-{}".format(ant1,
ant2),
color="#4682b4",
histtype='stepfilled',
alpha=0.3,
normed=True)
try:
clf_re = self._residuals_fits_scans[baseline][i][IF_][stoke][0]
sample = np.linspace(np.min(data_.real) - x_c,
np.max(data_.real) - x_c,
1000)
pdf = np.exp(clf_re.score_samples(sample[:, np.newaxis]))
axes[0, 0].plot(sample + x_c, pdf, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[0, 0].legend(fontsize='small')
axes[0, 0].axvline(0.0, lw=1, color='g')
axes[1, 1].hist(data_.imag, bins=10,
color="#4682b4",
orientation='horizontal',
histtype='stepfilled',
alpha=0.3, normed=True,
label="Im "
"{}-{}".format(ant1,
ant2))
try:
clf_im = self._residuals_fits_scans[baseline][i][IF_][stoke][1]
sample = np.linspace(np.min(data_.imag) + y_c,
np.max(data_.imag) + y_c,
1000)
pdf = np.exp(clf_im.score_samples(sample[:, np.newaxis]))
axes[1, 1].plot(pdf, sample - y_c, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[1, 1].legend(fontsize='small')
axes[1, 1].axhline(0.0, lw=1, color='g')
fig.savefig("{}_ant1_{}_ant2_{}_stokes_{}_IF_{}_scan_{}.png".format(outname,
ant1, ant2, self.residuals.stokes[stoke],
IF_, i), bbox_inches='tight', dpi=400)
matplotlib.pyplot.close()
# If ``self.residuals._indxs_baselines_scans[baseline] = None``
except TypeError:
continue
else:
indxs = self.residuals._indxs_baselines[baseline]
# Complex (#, #IF, #stokes)
data = self.residuals.uvdata[indxs]
# weights = self.residuals.weights[indxs]
if freq_average:
raise NotImplementedError
else:
for IF_ in IF:
for stoke in stokes:
print("Stokes {}".format(stoke))
# Complex 1D array to plot
data_ = data[:, IF_, stoke]
# weigths_ = weights[:, IF_, stoke]
# data_pw = data_[weigths_ > 0]
data_pw = data_[self.residuals._pw_indxs[indxs, IF_, stoke]]
data_nw = data_[self.residuals._nw_indxs[indxs, IF_, stoke]]
data_out = data_pw[self._residuals_outliers[baseline][IF_][stoke]]
# data_nw = data_[weigths_ <= 0]
fig, axes = matplotlib.pyplot.subplots(nrows=2,
ncols=2)
matplotlib.pyplot.rcParams.update({'axes.titlesize':
'small'})
axes[1, 0].plot(data_.real, data_.imag, '.k')
axes[1, 0].plot(data_out.real, data_out.imag, '.r')
axes[1, 0].plot(data_nw.real, data_nw.imag, '.', color='orange')
try:
x_c, y_c = self._residuals_centers[baseline][IF_][stoke]
axes[1, 0].plot(x_c, y_c, '.y')
except ValueError:
x_c, y_c = 0., 0.
axes[1, 0].axvline(0.0, lw=0.2, color='g')
axes[1, 0].axhline(0.0, lw=0.2, color='g')
axes[0, 0].hist(data_.real, bins=20,
label="Re {}-{}".format(ant1, ant2),
color="#4682b4",
histtype='stepfilled', alpha=0.3,
normed=True)
try:
clf_re = self._residuals_fits[baseline][IF_][stoke][0]
sample = np.linspace(np.min(data_.real) - x_c,
np.max(data_.real) - x_c,
1000)
pdf = np.exp(clf_re.score_samples(sample[:, np.newaxis]))
axes[0, 0].plot(sample + x_c, pdf, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[0, 0].legend(fontsize='small')
axes[0, 0].axvline(0.0, lw=1, color='g')
axes[1, 1].hist(data_.imag, bins=20,
color="#4682b4",
orientation='horizontal',
histtype='stepfilled', alpha=0.3,
normed=True,
label="Im {}-{}".format(ant1, ant2))
try:
clf_im = self._residuals_fits[baseline][IF_][stoke][1]
sample = np.linspace(np.min(data_.imag) + y_c,
np.max(data_.imag) + y_c,
1000)
pdf = np.exp(clf_im.score_samples(sample[:, np.newaxis]))
axes[1, 1].plot(pdf, sample - y_c, color='blue',
alpha=0.5, lw=2, label='kde')
# ``AttributeError`` when no ``clf`` for that
# baseline, IF, Stokes
except (ValueError, AttributeError):
pass
legend = axes[1, 1].legend(fontsize='small')
axes[1, 1].axhline(0.0, lw=1, color='g')
fig.savefig("{}_ant1_{}_ant2_{}_stokes_{}_IF_{}.png".format(outname,
ant1, ant2, self.residuals.stokes[stoke], IF_),
bbox_inches='tight', dpi=400)
matplotlib.pyplot.close()
def find_outliers_in_residuals(self, split_scans=False):
"""
Method that search outliers in residuals
:param split_scans:
Boolean. Find outliers on each scan separately?
"""
print("Searching for outliers in residuals...")
for baseline in self.residuals.baselines:
indxs = self.residuals._indxs_baselines[baseline]
baseline_data = self.residuals.uvdata[indxs]
# If searching outliers in baseline data
if not split_scans:
for if_ in range(baseline_data.shape[1]):
for stokes in range(baseline_data.shape[2]):
# Complex array with visibilities for given baseline,
# #IF, Stokes
data = baseline_data[:, if_, stokes]
# weigths = self.residuals.weights[indxs, if_, stokes]
# Use only valid data with positive weight
data_pw = data[self.residuals._pw_indxs[indxs, if_, stokes]]
data_nw = data[self.residuals._nw_indxs[indxs, if_, stokes]]
print("NW {}".format(np.count_nonzero(data_nw)))
# If data are zeros
if not np.any(data_pw):
continue
print("Baseline {}, IF {}, Stokes {}".format(baseline,
if_,
stokes))
outliers_re = find_outliers_dbscan(data_pw.real, 1., 5)
outliers_im = find_outliers_dbscan(data_pw.imag, 1., 5)
outliers_1d = np.logical_or(outliers_re, outliers_im)
outliers_2d = find_outliers_2d_dbscan(data_pw, 1.5, 5)
self._residuals_outliers[baseline][if_][stokes] =\
np.logical_or(outliers_1d, outliers_2d)
# If searching outliers on each scan
else:
# Searching each scan on current baseline
# FIXME: Use zero centers for shitty scans?
if self.residuals.scans_bl[baseline] is None:
continue
for i, scan_indxs in enumerate(self.residuals.scans_bl[baseline]):
scan_uvdata = self.residuals.uvdata[scan_indxs]
for if_ in range(scan_uvdata.shape[1]):
for stokes in range(scan_uvdata.shape[2]):
# Complex array with visibilities for given
# baseline, #scan, #IF, Stokes
data = scan_uvdata[:, if_, stokes]
# weigths = self.residuals.weights[scan_indxs, if_,
# stokes]
# Use only valid data with positive weight
data_pw = data[self.residuals._pw_indxs[scan_indxs, if_, stokes]]
data_nw = data[self.residuals._nw_indxs[scan_indxs, if_, stokes]]
print("NW {}".format(np.count_nonzero(data_nw)))
# If data are zeros
if not np.any(data_pw):
continue
print("Baseline {}, scan {}, IF {}," \
" Stokes {}".format(baseline, i, if_, stokes))
outliers_re = find_outliers_dbscan(data_pw.real, 1., 5)
outliers_im = find_outliers_dbscan(data_pw.imag, 1., 5)
outliers_1d = np.logical_or(outliers_re, outliers_im)
outliers_2d = find_outliers_2d_dbscan(data_pw, 1.5, 5)
self._residuals_outliers_scans[baseline][i][if_][stokes] = \
np.logical_or(outliers_1d, outliers_2d)
# TODO: Use only data without outliers
def find_residuals_centers(self, split_scans):
"""
Calculate centers of residuals for each baseline[/scan]/IF/stokes.
"""
print("Finding centers")
for baseline in self.residuals.baselines:
# Find centers for baselines only
if not split_scans:
indxs = self.residuals._indxs_baselines[baseline]
baseline_data = self.residuals.uvdata[indxs]
for if_ in range(baseline_data.shape[1]):
for stokes in range(baseline_data.shape[2]):
data = baseline_data[:, if_, stokes]
# weigths = self.residuals.weights[indxs, if_, stokes]
# Use only valid data with positive weight
# data_pw = data[weigths > 0]
data_pw = data[self.residuals._pw_indxs[indxs, if_, stokes]]
# data_nw = data[self.residuals._nw_indxs[indxs, if_, stokes]]
# If data are zeros
if not np.any(data_pw):
continue
print("Baseline {}, IF {}, Stokes {}".format(baseline, if_,
stokes))
outliers = self._residuals_outliers[baseline][if_][stokes]
x_c = np.sum(data_pw.real[~outliers]) / np.count_nonzero(~outliers)
y_c = np.sum(data_pw.imag[~outliers]) / np.count_nonzero(~outliers)
print("Center: ({:.4f}, {:.4f})".format(x_c, y_c))
self._residuals_centers[baseline][if_][stokes] = (x_c, y_c)
# Find residuals centers on each scan
else:
# Searching each scan on current baseline
# FIXME: Use zero centers for shitty scans?
if self.residuals.scans_bl[baseline] is None:
continue
for i, scan_indxs in enumerate(self.residuals.scans_bl[baseline]):
scan_uvdata = self.residuals.uvdata[scan_indxs]
for if_ in range(scan_uvdata.shape[1]):
for stokes in range(scan_uvdata.shape[2]):
data = scan_uvdata[:, if_, stokes]
# weigths = self.residuals.weights[scan_indxs, if_,
# stokes]
# Use only valid data with positive weight
# data_pw = data[weigths > 0]
data_pw = data[self.residuals._pw_indxs[scan_indxs, if_, stokes]]
# If data are zeros
if not | np.any(data_pw) | numpy.any |
import numpy as np
from agents.common import PLAYER1, PLAYER2, NO_PLAYER, initialize_game_state, is_valid_action
def test_valid_action_allValid():
game = initialize_game_state()
for i in {0, 1, 2, 3, 4, 5, 6}:
assert is_valid_action(game, i) == True
def test_valid_action_oneValid_Column6():
game = initialize_game_state()
game[-1] = | np.array([PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, PLAYER1, NO_PLAYER]) | numpy.array |
"""
Implement the Numpy backend, and collect timing information with different parameters
<NAME>
August 26th, 2021
I have set myself beyond the pale. I am nothing. I am hardly human anymore.
"""
import numpy as np
import pickle
import time
import sys
"""
########################################################################################################################
NETWORK STEP
Update all of the neural states for 1 timestep
"""
def stepAll(inputConnectivity, inputVals, Ulast, timeFactorMembrane, Gm, Ib, thetaLast, timeFactorThreshold, theta0, m, refCtr,
refPeriod, GmaxNon, GmaxSpk, Gspike, timeFactorSynapse, DelE, outputVoltageConnectivity,
outputSpikeConnectivity, R=20):
"""
All components are present
:param inputConnectivity: Matrix describing routing of input currents
:param inputVals: Value of input currents (nA)
:param Ulast: Vector of neural states at the previous timestep (mV)
:param timeFactorMembrane: Vector of constant parameters for each neuron (dt/Cm)
:param Gm: Vector of membrane conductances (uS)
:param Ib: Vector of bias currents (nA)
:param thetaLast: Firing threshold at the previous timestep (mV)
:param timeFactorThreshold: Vector of constant parameters for each neuron (dt/tauTheta)
:param theta0: Vector of initial firing thresholds (mV)
:param m: Vector of threshold adaptation ratios
:param refCtr: Vector to store remaining timesteps in the refractory period
:param refPeriod: Vector of refractory periods
:param GmaxNon: Matrix of maximum nonspiking synaptic conductances (uS)
:param GmaxSpk: Matrix of maximum spiking synaptic conductances (uS)
:param Gspike: Matrix of spiking synaptic conductances (uS)
:param timeFactorSynapse: Matrix of constant parameters for each synapse (dt/tau_syn)
:param DelE: Matrix of synaptic reversal potentials
:param outputVoltageConnectivity: Matrix describing routes to output nodes
:param outputSpikeConnectivity: Matrix describing routes to output nodes
:param R: Neural range (mV)
:return: u, u_last, theta_last, g_spike, refCtr, outputVoltages
"""
start = time.time()
Iapp = np.matmul(inputConnectivity,inputVals) # Apply external current sources to their destinations
Gnon = np.maximum(0, np.minimum(GmaxNon * Ulast/R, GmaxNon))
Gspike = Gspike * (1 - timeFactorSynapse)
Gsyn = Gnon + Gspike
Isyn = np.sum(Gsyn * DelE, axis=1) - Ulast * np.sum(Gsyn, axis=1)
U = Ulast + timeFactorMembrane * (-Gm * Ulast + Ib + Isyn + Iapp) # Update membrane potential
theta = thetaLast + timeFactorThreshold * (-thetaLast + theta0 + m * Ulast) # Update the firing thresholds
spikes = np.sign(np.minimum(0, theta + U * (-1 + refCtr))) # Compute which neurons have spiked
Gspike = np.maximum(Gspike, (-spikes) * GmaxSpk) # Update the conductance of connections which spiked
U = U * (spikes + 1) # Reset the membrane voltages of neurons which spiked
refCtr = np.maximum(0, refCtr - spikes * (refPeriod + 1) - 1) # Update refractory periods
outputVoltages = np.matmul(outputVoltageConnectivity, U) # Copy desired neural quantities to output nodes
outputSpikes = np.matmul(outputSpikeConnectivity, spikes) # Copy desired neural quantities to output nodes
Ulast = np.copy(U) # Copy the current membrane voltage to be the past value
thetaLast = np.copy(theta) # Copy the current threshold value to be the past value
end = time.time()
return U, Ulast, thetaLast, Gspike, refCtr, outputVoltages, outputSpikes, end-start
def stepNoRef(inputConnectivity, inputVals, Ulast, timeFactorMembrane, Gm, Ib, thetaLast, timeFactorThreshold, theta0, m, GmaxNon,
GmaxSpk, Gspike, timeFactorSynapse, DelE, outputVoltageConnectivity, outputSpikeConnectivity, R=20):
"""
There is no refractory period
:param inputConnectivity: Matrix describing routing of input currents
:param inputVals: Value of input currents (nA)
:param Ulast: Vector of neural states at the previous timestep (mV)
:param timeFactorMembrane: Vector of constant parameters for each neuron (dt/Cm)
:param Gm: Vector of membrane conductances (uS)
:param Ib: Vector of bias currents (nA)
:param thetaLast: Firing threshold at the previous timestep (mV)
:param timeFactorThreshold: Vector of constant parameters for each neuron (dt/tauTheta)
:param theta0: Vector of initial firing thresholds (mV)
:param m: Vector of threshold adaptation ratios
:param GmaxNon: Matrix of maximum nonspiking synaptic conductances (uS)
:param GmaxSpk: Matrix of maximum spiking synaptic conductances (uS)
:param Gspike: Matrix of spiking synaptic conductances (uS)
:param timeFactorSynapse: Matrix of constant parameters for each synapse (dt/tau_syn)
:param DelE: Matrix of synaptic reversal potentials
:param outputVoltageConnectivity: Matrix describing routes to output nodes
:param outputSpikeConnectivity: Matrix describing routes to output nodes
:param R: Range of neural activity (mV)
:return: u, u_last, theta_last, g_spike, outputVoltages, outputSpikes
"""
start = time.time()
Iapp = np.matmul(inputConnectivity,inputVals) # Apply external current sources to their destinations
Gnon = np.maximum(0, np.minimum(GmaxNon * Ulast/R, GmaxNon))
Gspike = Gspike * (1 - timeFactorSynapse)
Gsyn = Gnon + Gspike
Isyn = np.sum(Gsyn * DelE, axis=1) - Ulast * np.sum(Gsyn, axis=1)
U = Ulast + timeFactorMembrane * (-Gm * Ulast + Ib + Isyn + Iapp) # Update membrane potential
theta = thetaLast + timeFactorThreshold * (-thetaLast + theta0 + m * Ulast) # Update the firing thresholds
spikes = np.sign(np.minimum(0, theta - U)) # Compute which neurons have spiked
Gspike = np.maximum(Gspike, (-spikes) * GmaxSpk) # Update the conductance of connections which spiked
U = U * (spikes + 1) # Reset the membrane voltages of neurons which spiked
outputVoltages = np.matmul(outputVoltageConnectivity, U) # Copy desired neural quantities to output nodes
outputSpikes = np.matmul(outputSpikeConnectivity, spikes) # Copy desired neural quantities to output nodes
Ulast = np.copy(U) # Copy the current membrane voltage to be the past value
thetaLast = np.copy(theta) # Copy the current threshold value to be the past value
end = time.time()
return U, Ulast, thetaLast, Gspike, outputVoltages, outputSpikes, end - start
def stepNoSpike(inputConnectivity,inputVals,Ulast,timeFactorMembrane,Gm,Ib,GmaxNon,DelE,outputConnectivity,R=20):
"""
No neurons can be spiking
:param inputConnectivity: Matrix describing routing of input currents
:param inputVals: Value of input currents (nA)
:param Ulast: Vector of neural states at the previous timestep (mV)
:param timeFactorMembrane: Vector of constant parameters for each neuron (dt/Cm)
:param Gm: Vector of membrane conductances (uS)
:param Ib: Vector of bias currents (nA)
:param GmaxNon: Matrix of maximum nonspiking synaptic conductances (uS)
:param DelE: Matrix of synaptic reversal potentials
:param outputConnectivity: Matrix describing routes to output nodes
:param R: Range of neural activity (mV)
:return: u, u_last, outputNodes
"""
start = time.time()
Iapp = np.matmul(inputConnectivity,inputVals) # Apply external current sources to their destinations
Gsyn = np.maximum(0, np.minimum(GmaxNon * Ulast/R, GmaxNon))
Isyn = np.sum(Gsyn * DelE, axis=1) - Ulast * np.sum(Gsyn, axis=1)
U = Ulast + timeFactorMembrane * (-Gm * Ulast + Ib + Isyn + Iapp) # Update membrane potential
outputNodes = np.matmul(outputConnectivity,U) # Copy desired neural quantities to output nodes
Ulast = np.copy(U) # Copy the current membrane voltage to be the past value
end = time.time()
return U, Ulast, outputNodes,end-start
"""
########################################################################################################################
NETWORK CONSTRUCTION
Construct testing networks using specifications
"""
def constructAll(dt, numNeurons, probConn, perIn, perOut, perSpike, seed=0):
"""
All elements are present
:param dt: Simulation timestep (ms)
:param numNeurons: Number of neurons in the network
:param probConn: Percent of network which is connected
:param perIn: Percent of input nodes in the network
:param perOut: Percent of output nodes in the network
:param perSpike: Percent of neurons which are spiking
:param seed: Random seed
:return: All of the parameters required to run a network
"""
# Inputs
numInputs = int(perIn*numNeurons)
if numInputs == 0:
numInputs = 1
inputVals = np.zeros(numInputs)+1.0
inputConnectivity = np.zeros([numNeurons,numInputs]) + 1
# Construct neurons
Ulast = np.zeros(numNeurons)
numSpike = int(perSpike*numNeurons)
Cm = np.zeros(numNeurons) + 5.0 # membrane capacitance (nF)
Gm = np.zeros(numNeurons) + 1.0 # membrane conductance (uS)
Ib = np.zeros(numNeurons) + 10.0 # bias current (nA)
timeFactorMembrane = dt/Cm
# Threshold stuff
theta0 = np.zeros(numNeurons)
for i in range(numNeurons):
if i >= numSpike:
theta0[i] = sys.float_info.max
else:
theta0[i] = 1.0
thetaLast = np.copy(theta0)
m = np.zeros(numNeurons)
tauTheta = np.zeros(numNeurons)+1.0
timeFactorThreshold = dt/tauTheta
# Refractory period
refCtr = np.zeros(numNeurons)
refPeriod = np.zeros(numNeurons)+1
# Synapses
GmaxNon = np.zeros([numNeurons,numNeurons])
GmaxSpk = np.zeros([numNeurons,numNeurons])
Gspike = np.zeros([numNeurons,numNeurons])
DelE = np.zeros([numNeurons,numNeurons])
tauSyn = np.zeros([numNeurons, numNeurons])+1
np.random.seed(seed)
for row in range(numNeurons):
for col in range(numNeurons):
rand = np.random.uniform()
if rand < probConn:
DelE[row][col] = 100
if theta0[col] < sys.float_info.max:
GmaxSpk[row][col] = 1
else:
GmaxNon[row][col] = 1
tauSyn[row][col] = 2
timeFactorSynapse = dt/tauSyn
# Outputs
numOutputs = int(perOut*numNeurons)
if numOutputs == 0:
numOutputs = 1
outputVoltageConnectivity = np.zeros([numOutputs,numNeurons])
for i in range(numOutputs):
outputVoltageConnectivity[i][i] = 1
outputSpikeConnectivity = np.copy(outputVoltageConnectivity)
return (inputConnectivity,inputVals,Ulast,timeFactorMembrane,Gm,Ib,thetaLast,timeFactorThreshold,theta0,m,refCtr,
refPeriod,GmaxNon,GmaxSpk,Gspike,timeFactorSynapse,DelE,outputVoltageConnectivity,outputSpikeConnectivity)
def constructNoRef(dt,numNeurons,perConn,perIn,perOut,perSpike,seed=0):
"""
No refractory period
:param dt: Simulation timestep (ms)
:param numNeurons: Number of neurons in the network
:param perConn: Percent of network which is connected
:param perIn: Percent of input nodes in the network
:param perOut: Percent of output nodes in the network
:param perSpike: Percent of neurons which are spiking
:param seed: Random seed
:return: All of the parameters required to run a network
"""
# Inputs
numInputs = int(perIn*numNeurons)
inputVals = np.zeros(numInputs)+1.0
inputConnectivity = np.zeros([numNeurons,numInputs]) + 1
# Construct neurons
Ulast = np.zeros(numNeurons)
numSpike = int(perSpike*numNeurons)
Cm = np.zeros(numNeurons) + 5.0 # membrane capacitance (nF)
Gm = np.zeros(numNeurons) + 1.0 # membrane conductance (uS)
Ib = np.zeros(numNeurons) + 10.0 # bias current (nA)
timeFactorMembrane = dt/Cm
# Threshold stuff
theta0 = np.zeros(numNeurons)
for i in range(numNeurons):
if i >= numSpike:
theta0[i] = sys.float_info.max
else:
theta0[i] = 1.0
thetaLast = np.copy(theta0)
m = np.zeros(numNeurons)
tauTheta = np.zeros(numNeurons)+1.0
timeFactorThreshold = dt/tauTheta
# Synapses
GmaxNon = np.zeros([numNeurons,numNeurons])
GmaxSpk = np.zeros([numNeurons,numNeurons])
Gspike = np.zeros([numNeurons,numNeurons])
DelE = np.zeros([numNeurons,numNeurons])
tauSyn = np.zeros([numNeurons, numNeurons])+1
numSyn = int(perConn*numNeurons*numNeurons)
np.random.seed(seed)
for row in range(numNeurons):
for col in range(numNeurons):
rand = np.random.uniform()
if rand < probConn:
DelE[row][col] = 100
if theta0[col] < sys.float_info.max:
GmaxSpk[row][col] = 1
else:
GmaxNon[row][col] = 1
tauSyn[row][col] = 2
timeFactorSynapse = dt/tauSyn
# Outputs
numOutputs = int(perOut*numNeurons)
outputVoltageConnectivity = np.zeros([numOutputs, numNeurons])
for i in range(numOutputs):
outputVoltageConnectivity[i][i] = 1
outputSpikeConnectivity = np.copy(outputVoltageConnectivity)
return (inputConnectivity, inputVals, Ulast, timeFactorMembrane, Gm, Ib, thetaLast, timeFactorThreshold, theta0, m,
GmaxNon, GmaxSpk, Gspike, timeFactorSynapse, DelE, outputVoltageConnectivity, outputSpikeConnectivity)
def constructNoSpike(dt,numNeurons,perConn,perIn,perOut,seed=0):
"""
No spiking elements
:param dt: Simulation timestep (ms)
:param numNeurons: Number of neurons in the network
:param perConn: Percent of network which is connected
:param perIn: Percent of input nodes in the network
:param perOut: Percent of output nodes in the network
:param seed: Random seed
:return: All of the parameters required to run a network
"""
# Inputs
numInputs = int(perIn*numNeurons)
inputVals = np.zeros(numInputs)+1.0
inputConnectivity = np.zeros([numNeurons,numInputs]) + 1
# Construct neurons
Ulast = np.zeros(numNeurons)
Cm = np.zeros(numNeurons) + 5.0 # membrane capacitance (nF)
Gm = np.zeros(numNeurons) + 1.0 # membrane conductance (uS)
Ib = np.zeros(numNeurons) + 10.0 # bias current (nA)
timeFactorMembrane = dt/Cm
# Synapses
GmaxNon = np.zeros([numNeurons,numNeurons])
DelE = | np.zeros([numNeurons,numNeurons]) | numpy.zeros |
import hypothesis.extra.numpy as hnp
import numpy as np
from hypothesis import settings
from numpy.testing import assert_allclose
from mygrad.tensor_base import Tensor
from ..custom_strategies import adv_integer_index, basic_indices
from ..wrappers.uber import backprop_test_factory, fwdprop_test_factory
def test_getitem():
x = Tensor([1, 2, 3])
a, b, c = x
f = 2 * a + 3 * b + 4 * c
f.backward()
assert a.data == 1
assert b.data == 2
assert c.data == 3
assert f.data == 20
assert_allclose(a.grad, np.array(2))
assert_allclose(b.grad, np.array(3))
assert_allclose(c.grad, np.array(4))
assert_allclose(x.grad, | np.array([2, 3, 4]) | numpy.array |
"""
Multi-View Partial Point Clouds
The data structure will be:
data
├── MVP_Train.h5
| ├── incomplete_pcds (62400, 2048, 3)
| ├── complete_pcds (2400, 2048, 3)
| └── labels (62400,)
├── MVP_Validation.h5
| ├── incomplete_pcds (41600, 2048, 3)
| ├── complete_pcds (1600, 2048, 3)
| └── labels (41600,)
└── MVP_Test.h5
├── incomplete_pcds (59800, 2048, 3)
└── labels (59800,)
for details MVP_data_structure.md
"""
import random
import torch
import numpy as np
import h5py
from pathlib import Path
from src.utils.project_root import PROJECT_ROOT
class MVP(torch.utils.data.Dataset):
def __init__(self,
dataset_type: str = "train",
pcd_type: str = "complete",
*,
root='data/mvp/'):
"""
:param dataset_type: train/validation/test
:param pcd_type: complete/incomplete
"""
self.dataset_type = dataset_type
self.pcd_type = pcd_type
self.root = root
self.file_path = self.parsing_file_path()
self.input_data, self.labels, self.ground_truth_data = self.read_dataset()
self.len = self.input_data.shape[0]
def parsing_file_path(self):
file_path = PROJECT_ROOT / self.root
if self.dataset_type == "train":
file_path = file_path / "MVP_Train.h5"
elif self.dataset_type == "validation":
file_path = file_path / "MVP_Validation.h5"
else:
file_path = file_path / "MVP_Test.h5"
return file_path
def read_dataset(self):
input_file = h5py.File(self.file_path, 'r')
if self.dataset_type != "test":
if self.pcd_type == "complete":
input_data = np.array(input_file['complete_pcds'])
labels = np.array([input_file['labels'][i] for i in range(0, len(input_file['labels']), 26)])
ground_truth_data = None
else: # pcds_type == "incomplete"
input_data = np.array(input_file['incomplete_pcds'])
labels = np.array(input_file['labels'])
ground_truth_data = np.repeat(input_file['complete_pcds'], 26, axis=0)
else: # self.dataset_type == "test"
input_data = np.array(input_file['incomplete_pcds'])
labels = np.array(input_file['labels']).squeeze()
ground_truth_data = None
input_file.close()
return input_data, labels, ground_truth_data
def __len__(self):
return self.len
def __getitem__(self, index):
input_data = torch.from_numpy(self.input_data[index])
if self.ground_truth_data is not None:
ground_truth = torch.from_numpy(self.ground_truth_data[index])
else:
ground_truth = torch.empty(1)
label = torch.from_numpy(np.array(self.labels[index].astype('int64')))
# return input_data, label, ground_truth
return input_data, label, ground_truth
class Partitioned_MVP(torch.utils.data.Dataset):
def __init__(self,
dataset_type: str = "train",
pcd_type: str = "occluded",
*,
root='data/partitioned_mvp/'):
"""
:param dataset_type: train/validation/test
:param pcd_type: occluded-only
"""
self.dataset_type = dataset_type
self.pcd_type = pcd_type
self.root = root
self.file_path = self.parsing_file_path()
self.input_data, self.labels, self.ground_truth_data = self.read_dataset()
self.len = self.input_data.shape[0]
def parsing_file_path(self):
file_path = PROJECT_ROOT / self.root
if self.dataset_type == "train":
file_path = file_path / "Partitioned_MVP_Train.h5"
elif self.dataset_type == "validation":
file_path = file_path / "Partitioned_MVP_Validation.h5"
else:
file_path = file_path / "Partitioned_MVP_Test.h5"
return file_path
def read_dataset(self):
input_file = h5py.File(self.file_path, 'r')
if self.dataset_type != "test":
input_data = np.array(input_file['incomplete_pcds'])
labels = np.array(input_file['labels'])
ground_truth_data = np.array(input_file['complete_pcds'])
else: # self.dataset_type == "test"
input_data = | np.array(input_file['incomplete_pcds']) | numpy.array |
import numpy as np
import os
import csv
import physics as phys
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib.pylab as pylab
import DataAnalysis as Data
import utils
import GenerationRate.BandToBandTunneling as BTB
from scipy.optimize import curve_fit
params = {'legend.fontsize': 'x-large',
'figure.figsize': (20, 9.3),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
plt.rcParams.update({'font.size': 9})
# 物理常數
kB = 1.38e-23 # [J/k]
me = 9.11e-31 # [kg]
e = 1.6e-19 # [C]
eps_InP = 12.5 * 8.85e-14 # [F/cm]
eps_InGaAs = 13.9 * 8.85e-14 # [F/cm] In 0.53 Ga 0.47 As
eps_InGaAsP = 13.436 * 8.85e-14 # [F/cm] Approximated by In 0.53 Ga 0.47 As 0.65 P 0.35
h_bar = 1.054e-34 # [J-s]
Eti = {'InP': -0.025, 'InGaAs': 0.16}
# 繪圖參數
count = 6
ColorSet10 = ['orangered', 'yellowgreen', 'goldenrod', 'darkviolet', 'darkorange',
'brown', 'b', 'r', 'fuchsia', 'g']
LineSet2 = ['-', '-.']
ColorModel = {'SRH': 'r', 'TAT': 'b'}
class CurrentFitting(object):
def __init__(self, RawIV, voltage_settings, temperature, mode, electric_field, doping, Lifetime,
effective_mass, structure, others, trap_finding):
# 讀取IV,這裡必須給出 RawIV,不論TCAD還是實驗。
self.RawIV = RawIV
# 溫度設定
self.T_analysis, self.T_analysis_IT, self.T_min, self.T_max, self.T_analysis_v_max = temperature
self.v_min, self.v_max, v_max_range, self.Vpt, self.V1, self.V2 = voltage_settings
self.method, self.mechanism, self.material = mode
location_electric_field, label_electric_field = electric_field
self.Lifetime_p, self.Lifetime_n, self.Lifetime_ref = Lifetime
location_doping, label_doping = doping
self.epitaxy, self.interface_um, self.A = structure # interface_um = [-3.62, -3.5, -0.5]
self.ND, self.Ncharge, self.d_mul, self.d_ch, self.ND_abs, self.d_InGaAs = self.epitaxy
self.effective_mass_InP = effective_mass['InP']
self.effective_mass_InGaAs = effective_mass['InGaAs']
self.RawLocation, self.I_InP_max, self.TCAD_IV, self.TCAD_lifetime, self.TCAD_check = others
self.Eti, self.Eti_error = trap_finding
# 設定電壓範圍
v_step = 0.1
iterations = (self.v_max['InGaAs'] - self.v_min['InP']) / v_step
self.voltage = np.asarray([round(-self.v_min['InP'] - v_step * i, 1) for i in range(int(iterations))])
self.V_InP = np.asarray([element for element in self.voltage
if abs(self.v_min['InP']) <= abs(element) <= self.v_max['InP']])
self.V_InGaAs = np.asarray([element for element in self.voltage
if abs(self.v_min['InGaAs']) <= abs(element) <= self.v_max['InGaAs']])
if v_max_range == 'All':
for T in self.T_analysis:
self.T_analysis_v_max[T] = self.T_analysis_v_max[T] - 0.3
elif v_max_range == 'Partial':
self.T_analysis_v_max = {T: self.v_max['InGaAs'] for T in self.T_analysis} #
else:
raise BaseException("Wrong InGaAs analysis range: %s" % v_max_range)
# 製作 guess & bound
def tolerance(material, trap_level, error):
if material == 'InP':
lower_bound = max(trap_level - 0.5 * error * phys.Eg_InP(300), - 0.5 * error * phys.Eg_InP(300))
upper_bound = min(trap_level + 0.5 * error * phys.Eg_InP(300), 0.5 * error * phys.Eg_InP(300))
return lower_bound, upper_bound
elif material == 'InGaAs':
lower_bound = max(trap_level - 0.5 * error * phys.Eg_InGaAs(300), - 0.5 * phys.Eg_InGaAs(300))
upper_bound = min(trap_level + 0.5 * error * phys.Eg_InGaAs(300), 0.5 * phys.Eg_InGaAs(300))
return lower_bound, upper_bound
else:
raise BaseException("Wrong material (InP/InGaAs): %s" % material)
Bounds = {'InP': tolerance('InP', self.Eti['InP'], self.Eti_error['InP']),
'InGaAs': tolerance('InGaAs', self.Eti['InGaAs'], self.Eti_error['InGaAs'])}
SRH_InP_guess_IV = {T: [self.Eti['InP'], 1, 1] for T in self.T_analysis}
SRH_InP_bound_IV = {T: ([Bounds['InP'][0], 1, 1], [Bounds['InP'][1], 10, 10]) for T in self.T_analysis}
SRH_InGaAs_guess_IV = {T: [self.Eti['InGaAs'], 1, 1] for T in self.T_analysis}
SRH_InGaAs_bound_IV = {T: ([Bounds['InGaAs'][0], 0.1, 0.1], [Bounds['InGaAs'][1], 10, 10])
for T in self.T_analysis}
TAT_InP_guess_IV = {T: [self.Eti['InP'], 1, 1] for T in self.T_analysis}
TAT_InP_bound_IV = {T: ([Bounds['InP'][0], 1, 1], [Bounds['InP'][1], 1.5, 1.5])
for T in self.T_analysis}
TAT_InGaAs_guess_IV = {T: [self.Eti['InGaAs'], 1, 1] for T in self.T_analysis}
TAT_InGaAs_bound_IV = {T: ([Bounds['InGaAs'][0], 0.5, 0.85], [Bounds['InGaAs'][1], 1.5, 1.5])
for T in self.T_analysis}
# 製作 guess & bounds for IT fitting (Eti, tp, tn, alpha_p, alpha_n)
SRH_InP_guess_IT = {V: [self.Eti['InP'], 1, 1, 10, 1] for V in self.V_InP}
SRH_InP_bound_IT = {V: ([Bounds['InP'][0], 1, 1, 0.1, 0.1], [Bounds['InP'][1], 3, 3, 10, 10])
for V in self.V_InP}
SRH_InGaAs_guess_IT = {V: [self.Eti['InGaAs'], 1, 1, 5, 5] for V in self.V_InGaAs}
SRH_InGaAs_bound_IT = {V: ([Bounds['InGaAs'][0], 1e-1, 1, 0, 0], [Bounds['InGaAs'][1], 1, 10, 8, 8])
for V in self.V_InGaAs}
TAT_InP_guess_IT = {V: [Eti['InP'], 1, 1, 4, 4] for V in self.V_InP}
TAT_InP_bound_IT = {V: ([- phys.Eg_InP(300) / 2, 0.8, 0.8, 1, 1], [phys.Eg_InP(300) / 2, 1.5, 1.5, 8, 8]) for V in self.V_InP}
TAT_InGaAs_guess_IT = {V: [Eti['InGaAs'], 1, 1, 5, 5] for V in self.V_InGaAs}
TAT_InGaAs_bound_IT = {V: ([-phys.Eg_InGaAs(300) / 2, 1e-1, 1, 0, 0],
[phys.Eg_InGaAs(300) / 2, 1, 10, 8, 8]) for V in self.V_InGaAs}
self.guess = {'InP': {'SRH': {'IV': SRH_InP_guess_IV, 'IT': SRH_InP_guess_IT},
'TAT': {'IV': TAT_InP_guess_IV, 'IT': TAT_InP_guess_IT}},
'InGaAs': {'SRH': {'IV': SRH_InGaAs_guess_IV, 'IT': SRH_InGaAs_guess_IT},
'TAT': {'IV': TAT_InGaAs_guess_IV, 'IT': TAT_InGaAs_guess_IT}}}
self.bound = {'InP': {'SRH': {'IV': SRH_InP_bound_IV, 'IT': SRH_InP_bound_IT},
'TAT': {'IV': TAT_InP_bound_IV, 'IT': TAT_InP_bound_IT}},
'InGaAs': {'SRH': {'IV': SRH_InGaAs_bound_IV, 'IT': SRH_InGaAs_bound_IT},
'TAT': {'IV': TAT_InGaAs_bound_IV, 'IT': TAT_InGaAs_bound_IT}}}
# 讀取 InP & InGaAs 最大電場與偏壓的分佈
self.Ef_InP = Data.CSV(location_electric_field['InP'],
label_electric_field['InP'], label_electric_field['InP'])
self.Ef_InGaAs = Data.CSV(location_electric_field['InGaAs'],
label_electric_field['InGaAs'], label_electric_field['InGaAs'])
self.DopingProfile = Data.DopingProfile(location_doping, label_doping, label_doping)
#
self.material_voltage = {'InP': self.V_InP, 'InGaAs': self.V_InGaAs}
self.weight = {'InP': 1 / abs(self.V_InP), 'InGaAs': 1 / abs(self.V_InGaAs)}
self.result = dict()
for item in self.method:
if item == 'IV':
self.result['IV'] = {item: {model: {T: self.FitIV(T, item, model, self.guess[item][model]['IV'][T],
self.bound[item][model]['IV'][T], fitsigma=1.5)
for T in self.T_analysis} for model in self.mechanism}
for item in self.material}
self.Lifetime = {item: {model: {T: self.result['IV'][item][model][T][2] for T in self.T_analysis}
for model in self.mechanism} for item in self.material}
self.Lifetime['InGaAsP'] = {model: {T: [self.Lifetime_p['InGaAsP'], self.Lifetime_n['InGaAsP']]
for T in self.T_analysis} for model in self.mechanism}
if item == 'IT':
self.result['IT'] = {item: {model: {V: self.FitIT(V, item, model, self.guess[item][model]['IT'][V],
self.bound[item][model]['IT'][V], fitsigma=1)
for V in self.material_voltage[item]} for model in self.mechanism}
for item in self.material}
'''
self.BTB = {item: {T: self.PlotIV(T, item, 'BTB', ['All', self.effective_mass_InP]) for T in self.T_analysis} for
item in self.material}
'''
def read_data(self, temperature):
return self.RawIV[temperature]
def read_result(self):
return self.result
def room_temperature(self):
min = 1e4
RT = None
for T in self.T_analysis:
if abs(300 - T) < min:
min = abs(300 - T)
RT = T
return RT
def dm_InP(self, E_Vcm, ND, ND_c, d_mul, d_charge):
d = E_Vcm * eps_InP / (e * ND) # [cm]
if type(d) is np.ndarray:
dm_list = []
for i, x in enumerate(d):
if x <= d_mul:
dm_list.append(x)
else:
E2 = E_Vcm[i] - (e * ND * d_mul) / eps_InP
d2 = E2 * eps_InP / (e * ND_c)
if d2 <= d_charge:
dm_list.append(d_mul + d2)
else:
dm_list.append(d_mul + d_charge)
return np.asarray(dm_list) # [cm]
else:
if d <= d_mul:
return d # [cm]
else:
E2 = E_Vcm - (e * ND * d_mul) / eps_InP
d2 = E2 * eps_InP / (e * ND_c)
if d2 <= d_charge:
return d_mul + d2 # [cm]
else:
return d_mul + d_charge # [cm]
def dm_InGaAs(self, E, ND_abs, d_abs):
d = E * eps_InGaAs / (e * ND_abs)
if type(d) is np.ndarray:
dm_list = []
for x in d:
if x <= d_abs:
dm_list.append(x)
else:
dm_list.append(d_abs)
return np.asarray(dm_list)
else:
if d <= d_abs:
return d
else:
return d_abs
def Em_InP(self, V):
return utils.find(self.Ef_InP.X, self.Ef_InP.Y, -abs(V), 'linear')
def Em_InGaAs(self, V):
return utils.find(self.Ef_InGaAs.X, self.Ef_InGaAs.Y, -abs(V), 'linear')
def FitIV(self, T, material, type, guess, bound, fitsigma):
"""
:param T:
:param material:
:return: V, I, popt
"""
if material == 'InP':
V_InP = np.asarray([V for V in self.RawIV[T].X if -self.v_min['InP'] >= V > -self.v_max['InP']])
F_InP = np.asarray([self.Em_InP(V) for V in V_InP])
I_InP = np.asarray([abs(I) for i, I in enumerate(self.RawIV[T].Y) if self.RawIV[T].X[i] in V_InP])
def lifetime(tp, tn):
alpha = 1.5
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
return tau_p, tau_n
if type == 'TAT':
def TAT_InP_IV(X, Eti, tp, tn):
Emax_Vcm, T = X
alpha = 1.5
# tp = 1
# tn = 0.1
mt = self.effective_mass_InP
prefactor = 1
me = 9.11e-31
Nc300 = 5.716e17 # [cm-3]
Nv300 = 1.143e19 # [cm-3]
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InP(T) / (2 * kB * T))
G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm]
F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm]
E1 = Emax_Vcm
log10_Current = []
for i, x in enumerate(dM):
if x <= self.d_mul:
E2 = E1[i] - (e * self.ND * x) / eps_InP
d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \
(np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm]
log10_Current.append(
np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(x + d_Gamma_1))
else:
E2 = E1[i] - (e * self.ND * self.d_mul) / eps_InP
E3 = E2 - (e * self.Ncharge * (x - self.d_mul)) / eps_InP
d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \
(np.exp((E1[i] / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm]
d_Gamma_2 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.Ncharge) * \
(np.exp((E2 / F_Gamma) ** 2) - np.exp(E3 / F_Gamma ** 2)) # [cm]
log10_Current.append(
np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(
x + d_Gamma_1 + d_Gamma_2))
return np.asarray(log10_Current)
TAT_InP_popt, TAT_InP_pcov = curve_fit(TAT_InP_IV, (F_InP, T), np.log10(I_InP), p0=guess, bounds=bound,
sigma=abs(np.log10(I_InP)) ** fitsigma)
print('[TAT] InP (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' %
(T, TAT_InP_popt[0], TAT_InP_popt[1], TAT_InP_popt[2]))
Eti = TAT_InP_popt[0]
mt = self.effective_mass_InP
tau_p, tau_n = lifetime(TAT_InP_popt[1], TAT_InP_popt[2])
return V_InP, 10 ** TAT_InP_IV((F_InP, T), *TAT_InP_popt), [tau_p, tau_n], Eti, mt
elif type == 'SRH':
def SRH_InP(X, Eti, tp, tn):
"""
使用 -U ~ ni * cosh(-(Eti+ln(tp/tn))/kT) 之近似公式,而不需要使用 |Eti| >> kT 之公式。
內建正確的 lifetime。
:param X: (T, Emax_Vcm)
:param Eti: eV
:return: np.log10(I)
"""
Emax_Vcm, T = X
alpha = 1.5 # 1
# tp = 1 # 0.1
# tn = 1 # 0.226
prefactor = 1
me = 9.11e-31
Nc300 = 5.716e17 # [cm-3]
Nv300 = 1.143e19 # [cm-3]
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(- e * phys.Eg_InP(T) / (2 * kB * T))
G_SRH = ni / (
2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # [cm]
return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM)
popt_SRH_InP, pcov_SRH_InP = curve_fit(SRH_InP, (F_InP, T), np.log10(I_InP), p0=guess, bounds=bound,
sigma=abs(np.log10(I_InP)) ** fitsigma)
print('[SRH] InP (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' %
(T, popt_SRH_InP[0], popt_SRH_InP[1], popt_SRH_InP[2]))
Eti = popt_SRH_InP[0]
mt = self.effective_mass_InP
tau_p, tau_n = lifetime(popt_SRH_InP[1], popt_SRH_InP[2])
return V_InP, 10 ** SRH_InP((F_InP, T), *popt_SRH_InP), [tau_p, tau_n], Eti, mt
else:
raise BaseException("Wrong type: %s" % type)
elif material == 'InGaAs':
V_InGaAs = np.asarray([V for V in self.RawIV[T].X
if -self.T_analysis_v_max[T] <= V <= -self.v_min['InGaAs']])
F_InGaAs = np.asarray([self.Em_InGaAs(V) for V in V_InGaAs])
I_InGaAs = np.asarray([abs(I) - self.I_InP_max for i, I in enumerate(self.RawIV[T].Y)
if self.RawIV[T].X[i] in V_InGaAs])
# check negative current
for current in I_InGaAs:
if current < 0:
raise BaseException("please decrease the I(InP) maximum: %s" % self.I_InP_max)
def lifetime(tp, tn):
alpha = 1.5
tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
return tau_p, tau_n
if type == 'TAT':
def TAT_InGaAs_IV(X, Eti, tp, tn):
Emax_Vcm, T = X
prefactor = 1
# tp = 1
# tn = 1
mt = self.effective_mass_InGaAs
alpha = 1.5
me = 9.11e-31
Nc300 = 2.53956e17 # [cm-3]
Nv300 = 7.51e18 # [cm-3]
tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InGaAs(T) / (2 * kB * T))
G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InGaAs(Emax_Vcm, self.ND_abs, self.d_InGaAs) # [cm]
F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm]
E1 = Emax_Vcm
E2 = 0
d_Gamma = (np.sqrt(3 * np.pi) * eps_InGaAs * F_Gamma) / (e * self.ND_abs) * \
(np.exp((E1 / F_Gamma) ** 2) - np.exp((E2 / F_Gamma) ** 2)) # [cm]
return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM + d_Gamma)
if len(V_InGaAs) == 0:
return V_InGaAs, [], [0, 0], None, None
else:
TAT_InGaAs_popt, TAT_InGaAs_pcov = curve_fit(TAT_InGaAs_IV, (F_InGaAs, T), np.log10(I_InGaAs),
p0=guess,
bounds=bound,
sigma=abs(np.log10(I_InGaAs)) ** fitsigma)
print('[TAT] InGaAs (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' %
(T, TAT_InGaAs_popt[0], TAT_InGaAs_popt[1], TAT_InGaAs_popt[2]))
Eti = TAT_InGaAs_popt[0]
mt = self.effective_mass_InGaAs
tau_p, tau_n = lifetime(TAT_InGaAs_popt[1], TAT_InGaAs_popt[2])
return V_InGaAs, 10 ** TAT_InGaAs_IV((F_InGaAs, T), *TAT_InGaAs_popt) + \
np.ones(len(V_InGaAs)) * self.I_InP_max, [tau_p, tau_n], Eti, mt
elif type == 'SRH':
def SRH_InGaAs_IV(X, Eti, tp, tn):
Emax_Vcm, T = X
prefactor = 1
# tp = 1
# tn = 1
alpha = 1.5
me = 9.11e-31
Nc300 = 2.53956e17 # [cm-3]
Nv300 = 7.51e18 # [cm-3]
tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha
ND_abs = 7.5e14 # [cm-3]
d_InGaAs = 3e-4 # [cm]
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InGaAs(T) / (2 * kB * T))
G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InGaAs(Emax_Vcm, ND_abs, d_InGaAs) # [cm]
return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM)
if len(V_InGaAs) == 0:
return V_InGaAs, [], [0, 0], None
else:
SRH_InGaAs_popt, SRH_InGaAs_pcov = curve_fit(SRH_InGaAs_IV, (F_InGaAs, T), np.log10(I_InGaAs),
p0=guess, bounds=bound,
sigma=abs(np.log10(I_InGaAs)) ** fitsigma)
print('[SRH] InGaAs (%.0fK) Eti: %.3f, tp: %.3e, tn: %.3e' %
(T, SRH_InGaAs_popt[0], SRH_InGaAs_popt[1], SRH_InGaAs_popt[2]))
Eti = SRH_InGaAs_popt[0]
tau_p, tau_n = lifetime(SRH_InGaAs_popt[1], SRH_InGaAs_popt[2])
return V_InGaAs, 10 ** SRH_InGaAs_IV((F_InGaAs, T), *SRH_InGaAs_popt) + \
np.ones(len(V_InGaAs)) * self.I_InP_max, [tau_p, tau_n], Eti
else:
raise BaseException("Wrong type: %s" % type)
else:
raise BaseException("Wrong material: %s" % material)
def FitIT(self, V, material, type, guess, bound, fitsigma):
if material == 'InP':
I_InP = np.asarray([utils.find(self.RawIV[T].X, abs(self.RawIV[T].Y), V, 'log') for T in self.T_analysis_IT])
if type == 'TAT':
def TAT_InP_IT(X, Eti, tp, tn, alpha_p, alpha_n):
T, Emax_Vcm = X
mt = self.effective_mass_InP
prefactor = 1
me = 9.11e-31
Nc300 = 5.716e17 # [cm-3]
Nv300 = 1.143e19 # [cm-3]
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha_p
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha_n
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InP(T) / (2 * kB * T))
G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm]
F_Gamma = np.sqrt(24 * (mt * me) * (kB * T) ** 3) / (e * h_bar) / 100 # [V/cm]
E1 = Emax_Vcm
if dM <= self.d_mul:
E2 = E1 - (e * self.ND * dM) / eps_InP
d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \
(np.exp((E1 / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm]
return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM + d_Gamma_1)
else:
E2 = E1 - (e * self.ND * self.d_mul) / eps_InP
E3 = E2 - (e * self.Ncharge * (dM - self.d_mul)) / eps_InP
d_Gamma_1 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.ND) * \
(np.exp((E1 / F_Gamma) ** 2) - np.exp(E2 / F_Gamma ** 2)) # [cm]
d_Gamma_2 = (np.sqrt(3 * np.pi) * eps_InP * F_Gamma) / (e * self.Ncharge) * \
(np.exp((E2 / F_Gamma) ** 2) - np.exp(E3 / F_Gamma ** 2)) # [cm]
return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM + d_Gamma_1 + d_Gamma_2)
popt, pcov = curve_fit(TAT_InP_IT, (self.T_analysis_IT, self.Em_InP(V)), np.log10(I_InP), p0=guess,
bounds=bound, sigma=abs(np.log10(I_InP)) ** fitsigma)
Eti, tp, tn, alpha_p, alpha_n = popt
print('[TAT] InP (%.1f) Eti: %.3f, tp: %.3e, tn: %.3e, alpha(p): %.3e, alpha(n): %.3e' %
(V, Eti, tp, tn, alpha_p, alpha_n))
return self.T_analysis_IT, 10 ** TAT_InP_IT((self.T_analysis_IT, self.Em_InP(V)), *popt), \
Eti, [tp, tn, alpha_p, alpha_n]
elif type == 'SRH':
def SRH_InP_IT(X, Eti, tp, tn, alpha_n, alpha_p):
T, Emax_Vcm = X
# tp = 1
# tn = 1
prefactor = 1
Nc300 = 5.716e17 # [cm-3]
Nv300 = 1.143e19 # [cm-3]
tau_p0 = self.Lifetime_p['InP'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InP'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha_p
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha_n
ni = np.sqrt(Nc300 * Nv300) * (T / self.room_temperature()) ** 1.5 * np.exp(-e * phys.Eg_InP(T) / (2 * kB * T))
G_SRH = ni / (2 * np.sqrt(tau_p * tau_n) * np.cosh(e * Eti / (kB * T) + 0.5 * np.log(tau_p / tau_n)))
dM = self.dm_InP(Emax_Vcm, self.ND, self.Ncharge, self.d_mul, self.d_ch) # 0.42e-4 # [cm]
return np.log10(self.A * e) + np.log10(prefactor * G_SRH) + np.log10(dM)
popt, pcov = curve_fit(SRH_InP_IT, (self.T_analysis_IT, self.Em_InP(V)), np.log10(I_InP), p0=guess,
bounds=bound, sigma=abs(np.log10(I_InP)) ** fitsigma)
Eti, tp, tn, alpha_p, alpha_n = popt
print('[SRH] InP (%.1f) Eti: %.3f, tp: %.3e, tn: %.3e, alpha(p): %.3e, alpha(n): %.3e' %
(V, Eti, tp, tn, alpha_p, alpha_n))
return self.T_analysis_IT, 10 ** SRH_InP_IT((self.T_analysis_IT, self.Em_InP(V)), *popt), \
Eti, [tp, tn, alpha_p, alpha_n]
else:
raise BaseException("Wrong type: %s" % type)
elif material == 'InGaAs':
I_InGaAs = np.asarray([utils.find(self.RawIV[T].X, abs(self.RawIV[T].Y) - self.I_InP_max, V, 'log') for T in self.T_analysis_IT])
# check I(InGaAs)
for current in I_InGaAs:
if current < 0:
raise BaseException("please decrease the I(InP) maximum: %s" % self.I_InP_max)
# 檢查電流是否隨著溫度遞增
if abs(V) > abs(self.T_analysis_v_max[self.T_analysis_IT[0]]):
raise BaseException("Voltage is too large: %s > Vmax(InGaAs,240K) = %s" %
(abs(V), abs(self.T_analysis_v_max[self.T_analysis_IT[0]])))
if type == 'TAT':
def TAT_InGaAs_IT(X, Eti, tp, tn, alpha_p, alpha_n):
T, Emax_Vcm = X
prefactor = 1
mt = self.effective_mass_InGaAs
me = 9.11e-31
Nc300 = 2.53956e17 # [cm-3]
Nv300 = 7.51e18 # [cm-3]
tau_p0 = self.Lifetime_p['InGaAs'] * 1e-9 # [s]
tau_n0 = self.Lifetime_n['InGaAs'] * 1e-9 # [s]
tau_p = tp * tau_p0 * (T / self.room_temperature()) ** alpha_p
tau_n = tn * tau_n0 * (T / self.room_temperature()) ** alpha_n
ND_abs = 3.53e14 # [cm-3]
d_InGaAs = 3e-4 # [cm]
ni = | np.sqrt(Nc300 * Nv300) | numpy.sqrt |
"""This file contains code used in "Think Bayes",
by <NAME>, available from greenteapress.com
Copyright 2012 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import matplotlib.pyplot as pyplot
import thinkplot
import numpy
import csv
import random
import shelve
import sys
import time
import thinkbayes2
import warnings
warnings.simplefilter('error', RuntimeWarning)
FORMATS = ['pdf', 'eps', 'png']
class Locker(object):
"""Encapsulates a shelf for storing key-value pairs."""
def __init__(self, shelf_file):
self.shelf = shelve.open(shelf_file)
def Close(self):
"""Closes the shelf.
"""
self.shelf.close()
def Add(self, key, value):
"""Adds a key-value pair."""
self.shelf[str(key)] = value
def Lookup(self, key):
"""Looks up a key."""
return self.shelf.get(str(key))
def Keys(self):
"""Returns an iterator of keys."""
return self.shelf.iterkeys()
def Read(self):
"""Returns the contents of the shelf as a map."""
return dict(self.shelf)
class Subject(object):
"""Represents a subject from the belly button study."""
def __init__(self, code):
"""
code: string ID
species: sequence of (int count, string species) pairs
"""
self.code = code
self.species = []
self.suite = None
self.num_reads = None
self.num_species = None
self.total_reads = None
self.total_species = None
self.prev_unseen = None
self.pmf_n = None
self.pmf_q = None
self.pmf_l = None
def Add(self, species, count):
"""Add a species-count pair.
It is up to the caller to ensure that species names are unique.
species: string species/genus name
count: int number of individuals
"""
self.species.append((count, species))
def Done(self, reverse=False, clean_param=0):
"""Called when we are done adding species counts.
reverse: which order to sort in
"""
if clean_param:
self.Clean(clean_param)
self.species.sort(reverse=reverse)
counts = self.GetCounts()
self.num_species = len(counts)
self.num_reads = sum(counts)
def Clean(self, clean_param=50):
"""Identifies and removes bogus data.
clean_param: parameter that controls the number of legit species
"""
def prob_bogus(k, r):
"""Compute the probability that a species is bogus."""
q = clean_param / r
p = (1-q) ** k
return p
print(self.code, clean_param)
counts = self.GetCounts()
r = 1.0 * sum(counts)
species_seq = []
for k, species in sorted(self.species):
if random.random() < prob_bogus(k, r):
continue
species_seq.append((k, species))
self.species = species_seq
def GetM(self):
"""Gets number of observed species."""
return len(self.species)
def GetCounts(self):
"""Gets the list of species counts
Should be in increasing order, if Sort() has been invoked.
"""
return [count for count, _ in self.species]
def MakeCdf(self):
"""Makes a CDF of total prevalence vs rank."""
counts = self.GetCounts()
counts.sort(reverse=True)
cdf = thinkbayes2.Cdf(dict(enumerate(counts)))
return cdf
def GetNames(self):
"""Gets the names of the seen species."""
return [name for _, name in self.species]
def PrintCounts(self):
"""Prints the counts and species names."""
for count, name in reversed(self.species):
print(count, name)
def GetSpecies(self, index):
"""Gets the count and name of the indicated species.
Returns: count-species pair
"""
return self.species[index]
def GetCdf(self):
"""Returns cumulative prevalence vs number of species.
"""
counts = self.GetCounts()
items = enumerate(counts)
cdf = thinkbayes2.Cdf(items)
return cdf
def GetPrevalences(self):
"""Returns a sequence of prevalences (normalized counts).
"""
counts = self.GetCounts()
total = sum(counts)
prevalences = numpy.array(counts, dtype=numpy.float) / total
return prevalences
def Process(self, low=None, high=500, conc=1, iters=100):
"""Computes the posterior distribution of n and the prevalences.
Sets attribute: self.suite
low: minimum number of species
high: maximum number of species
conc: concentration parameter
iters: number of iterations to use in the estimator
"""
counts = self.GetCounts()
m = len(counts)
if low is None:
low = max(m, 2)
ns = range(low, high+1)
#start = time.time()
self.suite = Species5(ns, conc=conc, iters=iters)
self.suite.Update(counts)
#end = time.time()
#print 'Processing time' end-start
def MakePrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attributes
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
curves = self.RunSimulations(num_sims, add_reads)
self.pmf_l = self.MakePredictive(curves)
def MakeQuickPrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attribute:
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
pmf = thinkbayes2.Pmf()
_, seen = self.GetSeenSpecies()
for _ in range(num_sims):
_, observations = self.GenerateObservations(add_reads)
all_seen = seen.union(observations)
l = len(all_seen) - len(seen)
pmf.Incr(l)
pmf.Normalize()
self.pmf_l = pmf
def DistL(self):
"""Returns the distribution of additional species, l.
"""
return self.pmf_l
def MakeFigures(self):
"""Makes figures showing distribution of n and the prevalences."""
self.PlotDistN()
self.PlotPrevalences()
def PlotDistN(self):
"""Plots distribution of n."""
pmf = self.suite.DistN()
print('90% CI for N:', pmf.CredibleInterval(90))
pmf.label = self.code
thinkplot.Clf()
thinkplot.PrePlot(num=1)
thinkplot.Pmf(pmf)
root = 'species-ndist-%s' % self.code
thinkplot.Save(root=root,
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def PlotPrevalences(self, num=5):
"""Plots dist of prevalence for several species.
num: how many species (starting with the highest prevalence)
"""
thinkplot.Clf()
thinkplot.PrePlot(num=5)
for rank in range(1, num+1):
self.PlotPrevalence(rank)
root = 'species-prev-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 1],
)
def PlotPrevalence(self, rank=1, cdf_flag=True):
"""Plots dist of prevalence for one species.
rank: rank order of the species to plot.
cdf_flag: whether to plot the CDF
"""
# convert rank to index
index = self.GetM() - rank
_, mix = self.suite.DistOfPrevalence(index)
count, _ = self.GetSpecies(index)
mix.label = '%d (%d)' % (rank, count)
print('90%% CI for prevalence of species %d:' % rank, end=' ')
print(mix.CredibleInterval(90))
if cdf_flag:
cdf = mix.MakeCdf()
thinkplot.Cdf(cdf)
else:
thinkplot.Pmf(mix)
def PlotMixture(self, rank=1):
"""Plots dist of prevalence for all n, and the mix.
rank: rank order of the species to plot
"""
# convert rank to index
index = self.GetM() - rank
print(self.GetSpecies(index))
print(self.GetCounts()[index])
metapmf, mix = self.suite.DistOfPrevalence(index)
thinkplot.Clf()
for pmf in metapmf.Values():
thinkplot.Pmf(pmf, color='blue', alpha=0.2, linewidth=0.5)
thinkplot.Pmf(mix, color='blue', alpha=0.9, linewidth=2)
root = 'species-mix-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 0.3],
legend=False)
def GetSeenSpecies(self):
"""Makes a set of the names of seen species.
Returns: number of species, set of string species names
"""
names = self.GetNames()
m = len(names)
seen = set(SpeciesGenerator(names, m))
return m, seen
def GenerateObservations(self, num_reads):
"""Generates a series of random observations.
num_reads: number of reads to generate
Returns: number of species, sequence of string species names
"""
n, prevalences = self.suite.SamplePosterior()
names = self.GetNames()
name_iter = SpeciesGenerator(names, n)
items = zip(name_iter, prevalences)
cdf = thinkbayes2.Cdf(dict(items))
observations = cdf.Sample(num_reads)
#for ob in observations:
# print ob
return n, observations
def Resample(self, num_reads):
"""Choose a random subset of the data (without replacement).
num_reads: number of reads in the subset
"""
t = []
for count, species in self.species:
t.extend([species]*count)
random.shuffle(t)
reads = t[:num_reads]
subject = Subject(self.code)
hist = thinkbayes2.Hist(reads)
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def Match(self, match):
"""Match up a rarefied subject with a complete subject.
match: complete Subject
Assigns attributes:
total_reads:
total_species:
prev_unseen:
"""
self.total_reads = match.num_reads
self.total_species = match.num_species
# compute the prevalence of unseen species (at least approximately,
# based on all species counts in match
_, seen = self.GetSeenSpecies()
seen_total = 0.0
unseen_total = 0.0
for count, species in match.species:
if species in seen:
seen_total += count
else:
unseen_total += count
self.prev_unseen = unseen_total / (seen_total + unseen_total)
def RunSimulation(self, num_reads, frac_flag=False, jitter=0.01):
"""Simulates additional observations and returns a rarefaction curve.
k is the number of additional observations
num_new is the number of new species seen
num_reads: how many new reads to simulate
frac_flag: whether to convert to fraction of species seen
jitter: size of jitter added if frac_flag is true
Returns: list of (k, num_new) pairs
"""
m, seen = self.GetSeenSpecies()
n, observations = self.GenerateObservations(num_reads)
curve = []
for i, obs in enumerate(observations):
seen.add(obs)
if frac_flag:
frac_seen = len(seen) / float(n)
frac_seen += random.uniform(-jitter, jitter)
curve.append((i+1, frac_seen))
else:
num_new = len(seen) - m
curve.append((i+1, num_new))
return curve
def RunSimulations(self, num_sims, num_reads, frac_flag=False):
"""Runs simulations and returns a list of curves.
Each curve is a sequence of (k, num_new) pairs.
num_sims: how many simulations to run
num_reads: how many samples to generate in each simulation
frac_flag: whether to convert num_new to fraction of total
"""
curves = [self.RunSimulation(num_reads, frac_flag)
for _ in range(num_sims)]
return curves
def MakePredictive(self, curves):
"""Makes a predictive distribution of additional species.
curves: list of (k, num_new) curves
Returns: Pmf of num_new
"""
pred = thinkbayes2.Pmf(label=self.code)
for curve in curves:
_, last_num_new = curve[-1]
pred.Incr(last_num_new)
pred.Normalize()
return pred
def MakeConditionals(curves, ks):
"""Makes Cdfs of the distribution of num_new conditioned on k.
curves: list of (k, num_new) curves
ks: list of values of k
Returns: list of Cdfs
"""
joint = MakeJointPredictive(curves)
cdfs = []
for k in ks:
pmf = joint.Conditional(1, 0, k)
pmf.label = 'k=%d' % k
cdf = pmf.MakeCdf()
cdfs.append(cdf)
print('90%% credible interval for %d' % k, end=' ')
print(cdf.CredibleInterval(90))
return cdfs
def MakeJointPredictive(curves):
"""Makes a joint distribution of k and num_new.
curves: list of (k, num_new) curves
Returns: joint Pmf of (k, num_new)
"""
joint = thinkbayes2.Joint()
for curve in curves:
for k, num_new in curve:
joint.Incr((k, num_new))
joint.Normalize()
return joint
def MakeFracCdfs(curves, ks):
"""Makes Cdfs of the fraction of species seen.
curves: list of (k, num_new) curves
Returns: list of Cdfs
"""
d = {}
for curve in curves:
for k, frac in curve:
if k in ks:
d.setdefault(k, []).append(frac)
cdfs = {}
for k, fracs in d.items():
cdf = thinkbayes2.Cdf(fracs)
cdfs[k] = cdf
return cdfs
def SpeciesGenerator(names, num):
"""Generates a series of names, starting with the given names.
Additional names are 'unseen' plus a serial number.
names: list of strings
num: total number of species names to generate
Returns: string iterator
"""
i = 0
for name in names:
yield name
i += 1
while i < num:
yield 'unseen-%d' % i
i += 1
def ReadRarefactedData(filename='journal.pone.0047712.s001.csv',
clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from http://www.plosone.org/article/
info%3Adoi%2F10.1371%2Fjournal.pone.0047712#s4
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
#_ = reader.next()
_ = next(reader)
subject = Subject('')
subject_map = {}
i = 0
for t in reader:
code = t[0]
if code != subject.code:
# start a new subject
subject = Subject(code)
subject_map[code] = subject
# append a number to the species names so they're unique
species = t[1]
species = '%s-%d' % (species, i)
i += 1
count = int(t[2])
subject.Add(species, count)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map
def ReadCompleteDataset(filename='BBB_data_from_Rob.csv', clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from personal correspondence with <NAME>, received 2-7-13.
Converted from xlsx to csv.
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
header = next(reader)
header = next(reader)
subject_codes = header[1:-1]
subject_codes = ['B'+code for code in subject_codes]
# create the subject map
uber_subject = Subject('uber')
subject_map = {}
for code in subject_codes:
subject_map[code] = Subject(code)
# read lines
i = 0
for t in reader:
otu_code = t[0]
if otu_code == '':
continue
# pull out a species name and give it a number
otu_names = t[-1]
taxons = otu_names.split(';')
species = taxons[-1]
species = '%s-%d' % (species, i)
i += 1
counts = [int(x) for x in t[1:-1]]
# print otu_code, species
for code, count in zip(subject_codes, counts):
if count > 0:
subject_map[code].Add(species, count)
uber_subject.Add(species, count)
uber_subject.Done(clean_param=clean_param)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map, uber_subject
def JoinSubjects():
"""Reads both datasets and computers their inner join.
Finds all subjects that appear in both datasets.
For subjects in the rarefacted dataset, looks up the total
number of reads and stores it as total_reads. num_reads
is normally 400.
Returns: map from code to Subject
"""
# read the rarefacted dataset
sampled_subjects = ReadRarefactedData()
# read the complete dataset
all_subjects, _ = ReadCompleteDataset()
for code, subject in sampled_subjects.items():
if code in all_subjects:
match = all_subjects[code]
subject.Match(match)
return sampled_subjects
def JitterCurve(curve, dx=0.2, dy=0.3):
"""Adds random noise to the pairs in a curve.
dx and dy control the amplitude of the noise in each dimension.
"""
curve = [(x+random.uniform(-dx, dx),
y+random.uniform(-dy, dy)) for x, y in curve]
return curve
def OffsetCurve(curve, i, n, dx=0.3, dy=0.3):
"""Adds random noise to the pairs in a curve.
i is the index of the curve
n is the number of curves
dx and dy control the amplitude of the noise in each dimension.
"""
xoff = -dx + 2 * dx * i / (n-1)
yoff = -dy + 2 * dy * i / (n-1)
curve = [(x+xoff, y+yoff) for x, y in curve]
return curve
def PlotCurves(curves, root='species-rare'):
"""Plots a set of curves.
curves is a list of curves; each curve is a list of (x, y) pairs.
"""
thinkplot.Clf()
color = '#225EA8'
n = len(curves)
for i, curve in enumerate(curves):
curve = OffsetCurve(curve, i, n)
xs, ys = zip(*curve)
thinkplot.Plot(xs, ys, color=color, alpha=0.3, linewidth=0.5)
thinkplot.Save(root=root,
xlabel='# samples',
ylabel='# species',
formats=FORMATS,
legend=False)
def PlotConditionals(cdfs, root='species-cond'):
"""Plots cdfs of num_new conditioned on k.
cdfs: list of Cdf
root: string filename root
"""
thinkplot.Clf()
thinkplot.PrePlot(num=len(cdfs))
thinkplot.Cdfs(cdfs)
thinkplot.Save(root=root,
xlabel='# new species',
ylabel='Prob',
formats=FORMATS)
def PlotFracCdfs(cdfs, root='species-frac'):
"""Plots CDFs of the fraction of species seen.
cdfs: map from k to CDF of fraction of species seen after k samples
"""
thinkplot.Clf()
color = '#225EA8'
for k, cdf in cdfs.items():
xs, ys = cdf.Render()
ys = [1-y for y in ys]
thinkplot.Plot(xs, ys, color=color, linewidth=1)
x = 0.9
y = 1 - cdf.Prob(x)
pyplot.text(x, y, str(k), fontsize=9, color=color,
horizontalalignment='center',
verticalalignment='center',
bbox=dict(facecolor='white', edgecolor='none'))
thinkplot.Save(root=root,
xlabel='Fraction of species seen',
ylabel='Probability',
formats=FORMATS,
legend=False)
class Species(thinkbayes2.Suite):
"""Represents hypotheses about the number of species."""
def __init__(self, ns, conc=1, iters=1000):
hypos = [thinkbayes2.Dirichlet(n, conc) for n in ns]
thinkbayes2.Suite.__init__(self, hypos)
self.iters = iters
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
# call Update in the parent class, which calls Likelihood
thinkbayes2.Suite.Update(self, data)
# update the next level of the hierarchy
for hypo in self.Values():
hypo.Update(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
# draw sample Likelihoods from the hypothetical Dirichlet dist
# and add them up
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of ways the observed species
# might have been chosen from all species
m = len(data)
like *= thinkbayes2.BinomialCoef(dirichlet.n, m)
return like
def DistN(self):
"""Computes the distribution of n."""
pmf = thinkbayes2.Pmf()
for hypo, prob in self.Items():
pmf.Set(hypo.n, prob)
return pmf
class Species2(object):
"""Represents hypotheses about the number of species.
Combines two layers of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def __init__(self, ns, conc=1, iters=1000):
self.ns = ns
self.conc = conc
self.probs = numpy.ones(len(ns), dtype=numpy.float)
self.params = numpy.ones(self.ns[-1], dtype=numpy.float) * conc
self.iters = iters
self.num_reads = 0
self.m = 0
def Preload(self, data):
"""Change the initial parameters to fit the data better.
Just an experiment. Doesn't work.
"""
m = len(data)
singletons = data.count(1)
num = m - singletons
print(m, singletons, num)
addend = numpy.ones(num, dtype=numpy.float) * 1
print(len(addend))
print(len(self.params[singletons:m]))
self.params[singletons:m] += addend
print('Preload', num)
def Update(self, data):
"""Updates the distribution based on data.
data: numpy array of counts
"""
self.num_reads += sum(data)
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
self.m = len(data)
#self.params[:self.m] += data * self.conc
self.params[:self.m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data for all values of n.
Draws one sample from the distribution of prevalences.
data: sequence of observed counts
Returns: numpy array of m likelihoods
"""
gammas = numpy.random.gamma(self.params)
m = len(data)
row = gammas[:m]
col = numpy.cumsum(gammas)
log_likes = []
for n in self.ns:
ps = row / col[n-1]
terms = numpy.log(ps) * data
log_like = terms.sum()
log_likes.append(log_like)
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
def DistN(self):
"""Computes the distribution of n.
Returns: new Pmf object
"""
pmf = thinkbayes2.Pmf(dict(zip(self.ns, self.probs)))
return pmf
def RandomN(self):
"""Returns a random value of n."""
return self.DistN().Random()
def DistQ(self, iters=100):
"""Computes the distribution of q based on distribution of n.
Returns: pmf of q
"""
cdf_n = self.DistN().MakeCdf()
sample_n = cdf_n.Sample(iters)
pmf = thinkbayes2.Pmf()
for n in sample_n:
q = self.RandomQ(n)
pmf.Incr(q)
pmf.Normalize()
return pmf
def RandomQ(self, n):
"""Returns a random value of q.
Based on n, self.num_reads and self.conc.
n: number of species
Returns: q
"""
# generate random prevalences
dirichlet = thinkbayes2.Dirichlet(n, conc=self.conc)
prevalences = dirichlet.Random()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(self.num_reads)
seen = set(sample)
# add up the prevalence of unseen species
q = 0
for species, prev in enumerate(prevalences):
if species not in seen:
q += prev
return q
def MarginalBeta(self, n, index):
"""Computes the conditional distribution of the indicated species.
n: conditional number of species
index: which species
Returns: Beta object representing a distribution of prevalence.
"""
alpha0 = self.params[:n].sum()
alpha = self.params[index]
return thinkbayes2.Beta(alpha, alpha0-alpha)
def DistOfPrevalence(self, index):
"""Computes the distribution of prevalence for the indicated species.
index: which species
Returns: (metapmf, mix) where metapmf is a MetaPmf and mix is a Pmf
"""
metapmf = thinkbayes2.Pmf()
for n, prob in zip(self.ns, self.probs):
beta = self.MarginalBeta(n, index)
pmf = beta.MakePmf()
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf)
return metapmf, mix
def SamplePosterior(self):
"""Draws random n and prevalences.
Returns: (n, prevalences)
"""
n = self.RandomN()
prevalences = self.SamplePrevalences(n)
#print 'Peeking at n_cheat'
#n = n_cheat
return n, prevalences
def SamplePrevalences(self, n):
"""Draws a sample of prevalences given n.
n: the number of species assumed in the conditional
Returns: numpy array of n prevalences
"""
if n == 1:
return [1.0]
q_desired = self.RandomQ(n)
q_desired = max(q_desired, 1e-6)
params = self.Unbias(n, self.m, q_desired)
gammas = numpy.random.gamma(params)
gammas /= gammas.sum()
return gammas
def Unbias(self, n, m, q_desired):
"""Adjusts the parameters to achieve desired prev_unseen (q).
n: number of species
m: seen species
q_desired: prevalence of unseen species
"""
params = self.params[:n].copy()
if n == m:
return params
x = sum(params[:m])
y = sum(params[m:])
a = x + y
#print x, y, a, x/a, y/a
g = q_desired * a / y
f = (a - g * y) / x
params[:m] *= f
params[m:] *= g
return params
class Species3(Species2):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observations
"""
# sample the likelihoods and add them up
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
m = len(data)
self.params[:m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data under all hypotheses.
data: list of observations
"""
# get a random sample
gammas = numpy.random.gamma(self.params)
# row is just the first m elements of gammas
m = len(data)
row = gammas[:m]
# col is the cumulative sum of gammas
col = numpy.cumsum(gammas)[self.ns[0]-1:]
# each row of the array is a set of ps, normalized
# for each hypothetical value of n
array = row / col[:, numpy.newaxis]
# computing the multinomial PDF under a log transform
# take the log of the ps and multiply by the data
terms = numpy.log(array) * data
# add up the rows
log_likes = terms.sum(axis=1)
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
# correct for the number of ways we could see m species
# out of a possible n
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
class Species4(Species):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
m = len(data)
# loop through the species and update one at a time
for i in range(m):
one = numpy.zeros(i+1)
one[i] = data[i]
# call the parent class
Species.Update(self, one)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
Note: this only works correctly if we update one species at a time.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of unseen species the new one
# could have been
m = len(data)
num_unseen = dirichlet.n - m + 1
like *= num_unseen
return like
class Species5(Species2):
"""Represents hypotheses about the number of species.
Combines two laters of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies in increasing order
"""
# loop through the species and update one at a time
m = len(data)
for i in range(m):
self.UpdateOne(i+1, data[i])
self.params[i] += data[i]
def UpdateOne(self, i, count):
"""Updates the suite based on the data.
Evaluates the likelihood for all values of n.
i: which species was observed (1..n)
count: how many were observed
"""
# how many species have we seen so far
self.m = i
# how many reads have we seen
self.num_reads += count
if self.iters == 0:
return
# sample the likelihoods and add them up
likes = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
likes += self.SampleLikelihood(i, count)
# correct for the number of unseen species the new one
# could have been
unseen_species = [n-i+1 for n in self.ns]
likes *= unseen_species
# multiply the priors by the likelihoods and renormalize
self.probs *= likes
self.probs /= self.probs.sum()
def SampleLikelihood(self, i, count):
"""Computes the likelihood of the data under all hypotheses.
i: which species was observed
count: how many were observed
"""
# get a random sample of p
gammas = numpy.random.gamma(self.params)
# sums is the cumulative sum of p, for each value of n
sums = numpy.cumsum(gammas)[self.ns[0]-1:]
# get p for the mth species, for each value of n
ps = gammas[i-1] / sums
log_likes = numpy.log(ps) * count
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = | numpy.exp(log_likes) | numpy.exp |
"""
Copyright (c) 2014, Samsung Electronics Co.,Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Samsung Electronics Co.,Ltd..
"""
"""
cuda4py - CUDA cffi bindings and helper classes.
URL: https://github.com/ajkxyz/cuda4py
Original author: <NAME> <<EMAIL>>
"""
"""
Tests some of the api in cuda4py.blas._cublas module.
"""
import cuda4py as cu
import cuda4py.blas as blas
import gc
import logging
import numpy
import os
import unittest
class Test(unittest.TestCase):
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self.old_env = os.environ.get("CUDA_DEVICE")
if self.old_env is None:
os.environ["CUDA_DEVICE"] = "0"
self.ctx = cu.Devices().create_some_context()
self.blas = blas.CUBLAS(self.ctx)
self.path = os.path.dirname(__file__)
if not len(self.path):
self.path = "."
def tearDown(self):
if self.old_env is None:
del os.environ["CUDA_DEVICE"]
else:
os.environ["CUDA_DEVICE"] = self.old_env
del self.old_env
del self.blas
del self.ctx
gc.collect()
def test_constants(self):
self.assertEqual(blas.CUBLAS_OP_N, 0)
self.assertEqual(blas.CUBLAS_OP_T, 1)
self.assertEqual(blas.CUBLAS_OP_C, 2)
self.assertEqual(blas.CUBLAS_DATA_FLOAT, 0)
self.assertEqual(blas.CUBLAS_DATA_DOUBLE, 1)
self.assertEqual(blas.CUBLAS_DATA_HALF, 2)
self.assertEqual(blas.CUBLAS_DATA_INT8, 3)
self.assertEqual(blas.CUBLAS_POINTER_MODE_HOST, 0)
self.assertEqual(blas.CUBLAS_POINTER_MODE_DEVICE, 1)
self.assertEqual(blas.CUBLAS_STATUS_SUCCESS, 0)
self.assertEqual(blas.CUBLAS_STATUS_NOT_INITIALIZED, 1)
self.assertEqual(blas.CUBLAS_STATUS_ALLOC_FAILED, 3)
self.assertEqual(blas.CUBLAS_STATUS_INVALID_VALUE, 7)
self.assertEqual(blas.CUBLAS_STATUS_ARCH_MISMATCH, 8)
self.assertEqual(blas.CUBLAS_STATUS_MAPPING_ERROR, 11)
self.assertEqual(blas.CUBLAS_STATUS_EXECUTION_FAILED, 13)
self.assertEqual(blas.CUBLAS_STATUS_INTERNAL_ERROR, 14)
self.assertEqual(blas.CUBLAS_STATUS_NOT_SUPPORTED, 15)
self.assertEqual(blas.CUBLAS_STATUS_LICENSE_ERROR, 16)
def test_errors(self):
idx = cu.CU.ERRORS[blas.CUBLAS_STATUS_NOT_INITIALIZED].find(" | ")
self.assertGreater(idx, 0)
def _test_gemm(self, gemm, dtype):
for mode in (blas.CUBLAS_POINTER_MODE_HOST,
blas.CUBLAS_POINTER_MODE_DEVICE):
self._test_gemm_with_mode(gemm, dtype, mode)
def _test_gemm_with_mode(self, gemm, dtype, mode):
self.blas.set_pointer_mode(mode)
a = | numpy.zeros([127, 353], dtype=dtype) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 10:09:21 2019
@author: nmei
"""
from autoreject import (AutoReject,get_rejection_threshold)
import mne
from glob import glob
import re
import os
import numpy as np
import pandas as pd
import pickle
#import faster # https://gist.github.com/wmvanvliet/d883c3fe1402c7ced6fc
from sklearn.metrics import roc_auc_score,roc_curve
from sklearn.metrics import (
classification_report,
matthews_corrcoef,
confusion_matrix,
f1_score,
log_loss,
r2_score
)
from sklearn.preprocessing import (MinMaxScaler,
OneHotEncoder,
FunctionTransformer,
StandardScaler)
from sklearn.pipeline import make_pipeline
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.utils import shuffle
from sklearn.svm import SVC,LinearSVC
from sklearn.calibration import CalibratedClassifierCV
from sklearn.decomposition import PCA
from sklearn.dummy import DummyClassifier
from sklearn.feature_selection import (SelectFromModel,
SelectPercentile,
VarianceThreshold,
mutual_info_classif,
f_classif,
chi2,
f_regression,
GenericUnivariateSelect)
from sklearn.model_selection import (StratifiedShuffleSplit,
cross_val_score)
from sklearn.ensemble import RandomForestClassifier,BaggingClassifier,VotingClassifier
from sklearn.neural_network import MLPClassifier
from xgboost import XGBClassifier
from itertools import product,combinations
from sklearn.base import clone
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from collections import OrderedDict
from scipy import stats
from collections import Counter
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from nilearn.plotting.img_plotting import (_load_anat,
_utils,
_plot_img_with_bg,
_get_colorbar_and_data_ranges,
_safe_get_data)
import matplotlib.patches as patches
try:
#from mvpa2.datasets.base import Dataset
from mvpa2.mappers.fx import mean_group_sample
#from mvpa2.measures import rsa
#from mvpa2.measures.searchlight import sphere_searchlight
#from mvpa2.base.learner import ChainLearner
#from mvpa2.mappers.shape import TransposeMapper
#from mvpa2.generators.partition import NFoldPartitioner
except:
pass#print('pymvpa is not installed')
try:
# from tqdm import tqdm_notebook as tqdm
from tqdm.auto import tqdm
except:
print('why is tqdm not installed?')
def preprocessing_conscious(raw,
events,
session,
tmin = -0,
tmax = 1,
notch_filter = 50,
event_id = {'living':1,'nonliving':2},
baseline = (None,None),
perform_ICA = False,
lowpass = None,
interpolate_bad_channels = True,):
"""
0. re-reference - explicitly
"""
raw_ref ,_ = mne.set_eeg_reference(raw,
ref_channels = 'average',
projection = True,)
raw_ref.apply_proj() # it might tell you it already has been re-referenced, but do it anyway
# everytime before filtering, explicitly pick the type of channels you want
# to perform the filters
picks = mne.pick_types(raw_ref.info,
meg = False, # No MEG
eeg = True, # YES EEG
eog = perform_ICA, # depends on ICA
)
# regardless the bandpass filtering later, we should always filter
# for wire artifacts and their oscillations
raw_ref.notch_filter(np.arange(notch_filter,241,notch_filter),
picks = picks)
if lowpass is not None:
raw_ref.filter(None,lowpass,)
epochs = mne.Epochs(raw_ref,
events, # numpy array
event_id, # dictionary
tmin = tmin,
tmax = tmax,
baseline = baseline, # range of time for computing the mean references for each channel and subtract these values from all the time points per channel
picks = picks,
detrend = 1, # detrend
preload = True # must be true if we want to do further processing
)
"""
1. if necessary, perform ICA
"""
if perform_ICA:
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
if interpolate_bad_channels:
interpolation_list = faster_bad_channels(epochs,picks=picks)
for ch_name in interpolation_list:
epochs.info['bads'].append(ch_name)
epochs = epochs.interpolate_bads()
# ar = AutoReject(
# picks = picks,
# random_state = 12345,
# )
# ar.fit(epochs)
# _,reject_log = ar.transform(epochs,return_log=True)
# calculate the noise covariance of the epochs
noise_cov = mne.compute_covariance(epochs,#[~reject_log.bad_epochs],
tmin = baseline[0],
tmax = baseline[1],
method = 'empirical',
rank = None,)
# define an ica function
ica = mne.preprocessing.ICA(n_components = .99,
n_pca_components = .99,
max_pca_components = None,
method = 'infomax',
max_iter = int(3e3),
noise_cov = noise_cov,
random_state = 12345,)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ica.fit(epochs,#[~reject_log.bad_epochs],
picks = picks,
start = tmin,
stop = tmax,
decim = 3,
tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw.
)
# search for artificial ICAs automatically
# most of these hyperparameters were used in a unrelated published study
ica.detect_artifacts(epochs,#[~reject_log.bad_epochs],
eog_ch = ['FT9','FT10','TP9','TP10'],
eog_criterion = 0.4, # arbitary choice
skew_criterion = 1, # arbitary choice
kurt_criterion = 1, # arbitary choice
var_criterion = 1, # arbitary choice
)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs],
exclude = ica.exclude,
)
epochs = epochs_ica.copy()
else:
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
if interpolate_bad_channels:
interpolation_list = faster_bad_channels(epochs,picks=picks)
for ch_name in interpolation_list:
epochs.info['bads'].append(ch_name)
epochs = epochs.interpolate_bads()
# pick the EEG channels for later use
clean_epochs = epochs.pick_types(eeg = True, eog = False)
return clean_epochs
def preprocessing_unconscious(raw,
events,
session,
tmin = -0,
tmax = 1,
notch_filter = 50,
event_id = {'living':1,'nonliving':2},
baseline = (None,None),
perform_ICA = False,
eog_chs = [],
ecg_chs = [],):
# everytime before filtering, explicitly pick the type of channels you want
# to perform the filters
picks = mne.pick_types(raw.info,
meg = True, # No MEG
eeg = False, # NO EEG
eog = True, # YES EOG
ecg = True, # YES ECG
)
# regardless the bandpass filtering later, we should always filter
# for wire artifacts and their oscillations
if type(notch_filter) is list:
for item in notch_filter:
raw.notch_filter(np.arange(item,301,item),
picks = picks)
else:
raw.notch_filter(np.arange(notch_filter,301,notch_filter),
picks = picks)
# filter EOG and ECG channels
picks = mne.pick_types(raw.info,
meg = False,
eeg = False,
eog = True,
ecg = True,)
raw.filter(1,12,picks = picks,)
# epoch the data
picks = mne.pick_types(raw.info,
meg = True,
eog = True,
ecg = True,
)
epochs = mne.Epochs(raw,
events, # numpy array
event_id, # dictionary
tmin = tmin,
tmax = tmax,
baseline = baseline, # range of time for computing the mean references for each channel and subtract these values from all the time points per channel
picks = picks,
detrend = 1, # detrend
preload = True # must be true if we want to do further processing
)
"""
1. if necessary, perform ICA
"""
if perform_ICA:
picks = mne.pick_types(epochs.info,
meg = True, # YES MEG
eeg = False, # NO EEG
eog = False, # NO EOG
ecg = False, # NO ECG
)
# ar = AutoReject(
# picks = picks,
# random_state = 12345,
# )
# ar.fit(epochs)
# _,reject_log = ar.transform(epochs,return_log=True)
# calculate the noise covariance of the epochs
noise_cov = mne.compute_covariance(epochs,#[~reject_log.bad_epochs],
tmin = tmin,
tmax = 0,
method = 'empirical',
rank = None,)
# define an ica function
ica = mne.preprocessing.ICA(n_components = .99,
n_pca_components = .99,
max_pca_components = None,
method = 'extended-infomax',
max_iter = int(3e3),
noise_cov = noise_cov,
random_state = 12345,)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ica.fit(epochs,#[~reject_log.bad_epochs],
picks = picks,
start = tmin,
stop = tmax,
decim = 3,
tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw.
)
# search for artificial ICAs automatically
# most of these hyperparameters were used in a unrelated published study
ica.detect_artifacts(epochs,#[~reject_log.bad_epochs],
eog_ch = eog_chs,
ecg_ch = ecg_chs[0],
eog_criterion = 0.4, # arbitary choice
ecg_criterion = 0.1, # arbitary choice
skew_criterion = 1, # arbitary choice
kurt_criterion = 1, # arbitary choice
var_criterion = 1, # arbitary choice
)
epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs],
exclude = ica.exclude,
)
epochs = epochs_ica.copy()
# pick the EEG channels for later use
clean_epochs = epochs.pick_types(meg = True, eeg = True, eog = False)
return clean_epochs
def _preprocessing_conscious(
raw,events,session,
n_interpolates = np.arange(1,32,4),
consensus_pers = np.linspace(0,1.0,11),
event_id = {'living':1,'nonliving':2},
tmin = -0.15,
tmax = 0.15 * 6,
high_pass = 0.001,
low_pass = 30,
notch_filter = 50,
fix = False,
ICA = False,
logging = None,
filtering = False,):
"""
Preprocessing pipeline for conscious trials
Inputs
-------------------
raw: MNE Raw object, contineous EEG raw data
events: Numpy array with 3 columns, where the first column indicates time and the last column indicates event code
n_interpolates: list of values 1 <= N <= max number of channels
consensus_pers: ?? autoreject hyperparameter search grid
event_id: MNE argument, to control for epochs
tmin: first time stamp of the epoch
tmax: last time stamp of the epoch
high_pass: low cutoff of the bandpass filter
low_pass: high cutoff of the bandpass filter
notch_filter: frequency of the notch filter, 60 in US and 50 in Europe
fix : when "True", apply autoReject algorithm to remove artifacts that was not identifed in the ICA procedure
Output
ICA : when "True", apply ICA artifact correction in ICA space
logging: when not "None", output some log files for us to track the process
-------------------
Epochs: MNE Epochs object, segmented and cleaned EEG data (n_trials x n_channels x n_times)
"""
"""
0. re-reference - explicitly
"""
raw_ref ,_ = mne.set_eeg_reference(raw,
ref_channels = 'average',
projection = True,)
raw_ref.apply_proj() # it might tell you it already has been re-referenced, but do it anyway
"""
1. highpass filter
by a 4th order zero-phase Butterworth filter
"""
# everytime before filtering, explicitly pick the type of channels you want
# to perform the filters
picks = mne.pick_types(raw_ref.info,
meg = False, # No MEG
eeg = True, # YES EEG
eog = True, # YES EOG
)
# regardless the bandpass filtering later, we should always filter
# for wire artifacts and their oscillations
raw_ref.notch_filter(np.arange(notch_filter,241,notch_filter),
picks = picks)
# high pass filtering
picks = mne.pick_types(raw_ref.info,
meg = False, # No MEG
eeg = True, # YES EEG
eog = False, # No EOG
)
if filtering:
raw_ref.filter(high_pass,
None,
picks = picks,
filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”)
l_trans_bandwidth= high_pass,
method = 'fir', # overlap-add FIR filtering
phase = 'zero', # the delay of this filter is compensated for
fir_window = 'hamming', # The window to use in FIR design
fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2”
)
"""
2. epoch the data
"""
picks = mne.pick_types(raw_ref.info,
eeg = True, # YES EEG
eog = True, # YES EOG
)
epochs = mne.Epochs(raw_ref,
events, # numpy array
event_id, # dictionary
tmin = tmin,
tmax = tmax,
baseline = (tmin,- (1 / 60 * 20)), # range of time for computing the mean references for each channel and subtract these values from all the time points per channel
picks = picks,
detrend = 1, # linear detrend
preload = True # must be true if we want to do further processing
)
"""
4. ica on epoch data
"""
if ICA:
"""
3. apply autoreject
"""
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ar = AutoReject(
# n_interpolate = n_interpolates,
# consensus = consensus_pers,
# thresh_method = 'bayesian_optimization',
picks = picks,
random_state = 12345,
# n_jobs = 1,
# verbose = 'progressbar',
)
ar.fit(epochs)
_,reject_log = ar.transform(epochs,return_log=True)
if logging is not None:
fig = plot_EEG_autoreject_log(ar)
fig.savefig(logging,bbox_inches = 'tight')
for key in epochs.event_id.keys():
evoked = epochs[key].average()
fig_ = evoked.plot_joint(title = key)
fig_.savefig(logging.replace('.png',f'_{key}_pre.png'),
bbox_inches = 'tight')
plt.close('all')
# calculate the noise covariance of the epochs
noise_cov = mne.compute_covariance(epochs[~reject_log.bad_epochs],
tmin = tmin,
tmax = tmax,
method = 'empirical',
rank = None,)
# define an ica function
ica = mne.preprocessing.ICA(n_components = .99,
n_pca_components = .99,
max_pca_components = None,
method = 'extended-infomax',
max_iter = int(3e3),
noise_cov = noise_cov,
random_state = 12345,)
# # search for a global rejection threshold globally
# reject = get_rejection_threshold(epochs[~reject_log.bad_epochs],
# decim = 1,
# random_state = 12345)
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
ica.fit(epochs[~reject_log.bad_epochs],
picks = picks,
start = tmin,
stop = tmax,
# reject = reject, # if some data in a window has values that exceed the rejection threshold, this window will be ignored when computing the ICA
decim = 3,
tstep = 1. # Length of data chunks for artifact rejection in seconds. It only applies if inst is of type Raw.
)
# search for artificial ICAs automatically
# most of these hyperparameters were used in a unrelated published study
ica.detect_artifacts(epochs[~reject_log.bad_epochs],
eog_ch = ['FT9','FT10','TP9','TP10'],
eog_criterion = 0.4, # arbitary choice
skew_criterion = 2, # arbitary choice
kurt_criterion = 2, # arbitary choice
var_criterion = 2, # arbitary choice
)
# # explicitly search for eog ICAs
# eog_idx,scores = ica.find_bads_eog(raw_ref,
# start = tmin,
# stop = tmax,
# l_freq = 2,
# h_freq = 10,
# )
# ica.exclude += eog_idx
picks = mne.pick_types(epochs.info,
eeg = True, # YES EEG
eog = False # NO EOG
)
epochs_ica = ica.apply(epochs,#,[~reject_log.bad_epochs],
exclude = ica.exclude,
)
else:
picks = mne.pick_types(epochs.info,
eeg = True,
eog = False,)
# epochs.filter(None,
# low_pass,
# picks = picks,
# filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”)
# method = 'fir', # overlap-add FIR filtering
# phase = 'zero', # the delay of this filter is compensated for
# fir_window = 'hamming', # The window to use in FIR design
# fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2”
# )
if logging is not None:
for key in epochs.event_id.keys():
evoked = epochs[key].average()
fig_ = evoked.plot_joint(title = key)
fig_.savefig(logging.replace('.png',f'_{key}_post.png'),
bbox_inches = 'tight')
plt.close('all')
return epochs
if fix:
"""
"""
ar = AutoReject(
# n_interpolate = n_interpolates,
# consensus = consensus_pers,
# thresh_method = 'bayesian_optimization',
picks = picks,
random_state = 12345,
# n_jobs = 1,
# verbose = 'progressbar',
)
epochs_clean = ar.fit_transform(epochs_ica,
)
return epochs_clean.pick_types(eeg=True,eog=False)
else:
clean_epochs = epochs_ica.pick_types(eeg = True,
eog = False)
picks = mne.pick_types(clean_epochs.info,
eeg = True,
eog = False,)
# clean_epochs.filter(None,
# low_pass,
# picks = picks,
# filter_length = 'auto', # the filter length is chosen based on the size of the transition regions (6.6 times the reciprocal of the shortest transition band for fir_window=’hamming’ and fir_design=”firwin2”, and half that for “firwin”)
# method = 'fir', # overlap-add FIR filtering
# phase = 'zero', # the delay of this filter is compensated for
# fir_window = 'hamming', # The window to use in FIR design
# fir_design = 'firwin2', # a time-domain design technique that generally gives improved attenuation using fewer samples than “firwin2”
# )
if logging is not None:
for key in clean_epochs.event_id.keys():
evoked = epochs[key].average()
fig_ = evoked.plot_joint(title = key)
fig_.savefig(logging.replace('.png',f'_{key}_post.png'),
bbox_inches = 'tight')
plt.close('all')
return clean_epochs
def plot_temporal_decoding(times,
scores,
frames,
ii,
conscious_state,
plscores,
n_splits,
ylim = (0.2,0.8)):
scores_mean = scores.mean(0)
scores_se = scores.std(0) / np.sqrt(n_splits)
fig,ax = plt.subplots(figsize = (16,8))
ax.plot(times,scores_mean,
color = 'k',
alpha = .9,
label = f'Average across {n_splits} folds',
)
ax.fill_between(times,
scores_mean + scores_se,
scores_mean - scores_se,
color = 'red',
alpha = 0.4,
label = 'Standard Error',)
ax.axhline(0.5,
linestyle = '--',
color = 'k',
alpha = 0.7,
label = 'Chance level')
ax.axvline(0,
linestyle = '--',
color = 'blue',
alpha = 0.7,
label = 'Probe onset',)
if ii is not None:
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'blue',
alpha = 0.3,
label = 'probe offset ave +/- std',)
ax.set(xlim = (times.min(),
times.max()),
ylim = ylim,#(0.4,0.6),
title = f'Temporal decoding of {conscious_state} = {plscores.mean():.3f}+/-{plscores.std():.3f}',
)
ax.legend()
return fig,ax
def plot_temporal_generalization(scores_gen_,
times,
ii,
conscious_state,
frames,
vmin = 0.4,
vmax = 0.6):
fig, ax = plt.subplots(figsize = (10,10))
if len(scores_gen_.shape) > 2:
scores_gen_ = scores_gen_.mean(0)
im = ax.imshow(
scores_gen_,
interpolation = 'hamming',
origin = 'lower',
cmap = 'RdBu_r',
extent = times[[0, -1, 0, -1]],
vmin = vmin,
vmax = vmax,
)
ax.set_xlabel('Testing Time (s)')
ax.set_ylabel('Training Time (s)')
ax.set_title(f'Temporal generalization of {conscious_state}')
ax.axhline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
label = 'Probe onset',)
ax.axvline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
)
if ii is not None:
ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
label = 'probe offset ave +/- std',)
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
)
plt.colorbar(im, ax = ax)
ax.legend()
return fig,ax
def plot_t_stats(T_obs,
clusters,
cluster_p_values,
times,
ii,
conscious_state,
frames,):
# since the p values of each cluster is corrected for multiple comparison,
# we could directly use 0.05 as the threshold to filter clusters
T_obs_plot = 0 * np.ones_like(T_obs)
k = np.array([np.sum(c) for c in clusters])
if np.max(k) > 1000:
c_thresh = 1000
elif 1000 > np.max(k) > 500:
c_thresh = 500
elif 500 > np.max(k) > 100:
c_thresh = 100
elif 100 > np.max(k) > 10:
c_thresh = 10
else:
c_thresh = 0
for c, p_val in zip(clusters, cluster_p_values):
if (p_val <= 0.01) and (np.sum(c) >= c_thresh):# and (distance.cdist(np.where(c == True)[0].reshape(1,-1),np.where(c == True)[1].reshape(1,-1))[0][0] < 200):# and (np.sum(c) >= c_thresh):
T_obs_plot[c] = T_obs[c]
# defind the range of the colorbar
vmax = np.max(np.abs(T_obs))
vmin = -vmax# - 2 * t_threshold
plt.close('all')
fig,ax = plt.subplots(figsize=(10,10))
im = ax.imshow(T_obs_plot,
origin = 'lower',
cmap = plt.cm.RdBu_r,# to emphasize the clusters
extent = times[[0, -1, 0, -1]],
vmin = vmin,
vmax = vmax,
interpolation = 'lanczos',
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",
size = "5%",
pad = 0.2)
cb = plt.colorbar(im,
cax = cax,
ticks = np.linspace(vmin,vmax,3))
cb.ax.set(title = 'T Statistics')
ax.plot([times[0],times[-1]],[times[0],times[-1]],
linestyle = '--',
color = 'black',
alpha = 0.7,
)
ax.axhline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
label = 'Probe onset',)
ax.axvline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
)
if ii is not None:
ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
label = 'probe offset ave +/- std',)
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
)
ax.set(xlabel = 'Test time',
ylabel = 'Train time',
title = f'nonparametric t test of {conscious_state}')
ax.legend()
return fig,ax
def plot_p_values(times,
clusters,
cluster_p_values,
ii,
conscious_state,
frames):
width = len(times)
p_clust = np.ones((width, width))# * np.nan
k = np.array([np.sum(c) for c in clusters])
if np.max(k) > 1000:
c_thresh = 1000
elif 1000 > np.max(k) > 500:
c_thresh = 500
elif 500 > np.max(k) > 100:
c_thresh = 100
elif 100 > np.max(k) > 10:
c_thresh = 10
else:
c_thresh = 0
for c, p_val in zip(clusters, cluster_p_values):
if (np.sum(c) >= c_thresh):
p_val_ = p_val.copy()
if p_val_ > 0.05:
p_val_ = 1.
p_clust[c] = p_val_
# defind the range of the colorbar
vmax = 1.
vmin = 0.
plt.close('all')
fig,ax = plt.subplots(figsize = (10,10))
im = ax.imshow(p_clust,
origin = 'lower',
cmap = plt.cm.RdBu_r,# to emphasize the clusters
extent = times[[0, -1, 0, -1]],
vmin = vmin,
vmax = vmax,
interpolation = 'hanning',
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right",
size = "5%",
pad = 0.2)
cb = plt.colorbar(im,
cax = cax,
ticks = [0,0.05,1])
cb.ax.set(title = 'P values')
ax.plot([times[0],times[-1]],[times[0],times[-1]],
linestyle = '--',
color = 'black',
alpha = 0.7,
)
ax.axhline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
label = 'Probe onset',)
ax.axvline(0.,
linestyle = '--',
color = 'black',
alpha = 0.7,
)
if ii is not None:
ax.axhspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
label = 'probe offset ave +/- std',)
ax.axvspan(frames[ii][1] * (1 / 100) - frames[ii][2] * (1 / 100),
frames[ii][1] * (1 / 100) + frames[ii][2] * (1 / 100),
color = 'black',
alpha = 0.2,
)
ax.set(xlabel = 'Test time',
ylabel = 'Train time',
title = f'p value map of {conscious_state}')
ax.legend()
return fig,ax
def plot_EEG_autoreject_log(autoreject_object,):
ar = autoreject_object
loss = ar.loss_['eeg'].mean(axis=-1) # losses are stored by channel type.
fig,ax = plt.subplots(figsize=(10,6))
im = ax.matshow(loss.T * 1e6, cmap=plt.get_cmap('viridis'))
ax.set(xticks = range(len(ar.consensus)),
xticklabels = ar.consensus.round(2),
yticks = range(len(ar.n_interpolate)),
yticklabels = ar.n_interpolate)
# Draw rectangle at location of best parameters
idx, jdx = np.unravel_index(loss.argmin(), loss.shape)
rect = patches.Rectangle((idx - 0.5, jdx - 0.5), 1, 1, linewidth=2,
edgecolor='r', facecolor='none')
ax.add_patch(rect)
ax.xaxis.set_ticks_position('bottom')
ax.set(xlabel = r'Consensus percentage $\kappa$',
ylabel = r'Max sensors interpolated $\rho$',
title = 'Mean cross validation error (x 1e6)')
plt.colorbar(im)
return fig
def str2int(x):
if type(x) is str:
return float(re.findall(r'\d+',x)[0])
else:
return x
def simple_load(f,idx):
df = pd.read_csv(f)
df['run'] = idx
return df
def get_frames(directory,new = True,EEG = True):
if EEG:
files = glob(os.path.join(directory,'*trials.csv'))
# elif EEG == 'fMRI':
# files = glob(os.path.join(directory,'*trials.csv'))
else:
files = glob(os.path.join(directory,'*','*.csv'))
empty_temp = ''
for ii,f in enumerate(files):
df = pd.read_csv(f).dropna()
for vis,df_sub in df.groupby(['visible.keys_raw']):
try:
print(f'session {ii+1}, vis = {vis}, n_trials = {df_sub.shape[0]}')
empty_temp += f'session {ii+1}, vis = {vis}, n_trials = {df_sub.shape[0]}'
empty_temp += '\n'
except:
print('session {}, vis = {}, n_trials = {}'.format(ii+1,
vis,df_sub.shape[0]))
df = pd.concat([simple_load(f,ii).dropna() for ii,f in enumerate(files)])
try:
for col in ['probeFrames_raw',
'response.keys_raw',
'visible.keys_raw']:
# print(df[col])
df[col] = df[col].apply(str2int)
except:
for col in ['probe_Frames_raw',
'response.keys_raw',
'visible.keys_raw']:
# print(df[col])
df[col] = df[col].apply(str2int)
df["probeFrames_raw"] = df["probe_Frames_raw"]
df = df[df['probeFrames_raw'] != 999]
df = df.sort_values(['run','order'])
for vis,df_sub in df.groupby(['visible.keys_raw']):
df_press1 = df_sub[df_sub['response.keys_raw'] == 1]
df_press2 = df_sub[df_sub['response.keys_raw'] == 2]
prob1 = df_press1.shape[0] / df_sub.shape[0]
prob2 = df_press2.shape[0] / df_sub.shape[0]
try:
print(f"\nvis = {vis},mean frames = {np.median(df_sub['probeFrames_raw']):.5f}")
print(f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}")
empty_temp += f"\nvis = {vis},mean frames = {np.median(df_sub['probeFrames_raw']):.5f}\n"
empty_temp += f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}\n"
except:
print("\nvis = {},mean frames = {:.5f}".format(
vis,np.median(df_sub['probeFrames_raw'])))
print(f"vis = {vis},prob(press 1) = {prob1:.4f}, p(press 2) = {prob2:.4f}")
if new:
df = []
for f in files:
temp = pd.read_csv(f).dropna()
try:
temp[['probeFrames_raw','visible.keys_raw']]
except:
temp['probeFrames_raw'] = temp['probe_Frames_raw']
probeFrame = []
for ii,row in temp.iterrows():
if int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 1:
probeFrame.append(row['probeFrames_raw'])
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 2:
probeFrame.append(row['probeFrames_raw'])
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 3:
probeFrame.append(row['probeFrames_raw'])
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 4:
probeFrame.append(row['probeFrames_raw'])
temp['probeFrames'] = probeFrame
df.append(temp)
df = pd.concat(df)
else:
df = []
for f in files:
temp = pd.read_csv(f).dropna()
temp[['probeFrames_raw','visible.keys_raw']]
probeFrame = []
for ii,row in temp.iterrows():
if int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 1:
probeFrame.append(row['probeFrames_raw'] - 2)
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 2:
probeFrame.append(row['probeFrames_raw'] - 1)
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 3:
probeFrame.append(row['probeFrames_raw'] + 1)
elif int(re.findall(r'\d',row['visible.keys_raw'])[0]) == 4:
probeFrame.append(row['probeFrames_raw'] + 2)
temp['probeFrames'] = probeFrame
df.append(temp)
df = pd.concat(df)
df['probeFrames'] = df['probeFrames'].apply(str2int)
df = df[df['probeFrames'] != 999]
results = []
for vis,df_sub in df.groupby(['visible.keys_raw']):
corrects = df_sub['response.corr_raw'].sum() / df_sub.shape[0]
try:
print(f"vis = {vis},N = {df_sub.shape[0]},mean frames = {np.mean(df_sub['probeFrames']):.2f} +/- {np.std(df_sub['probeFrames']):.2f}\np(correct) = {corrects:.4f}")
empty_temp += f"vis = {vis},N = {df_sub.shape[0]},mean frames = {np.mean(df_sub['probeFrames']):.2f} +/- {np.std(df_sub['probeFrames']):.2f}\np(correct) = {corrects:.4f}\n"
empty_temp += f"RT = {np.mean(df_sub['visible.rt_raw']):.3f} +/- {np.std(df_sub['visible.rt_raw']):.3f}\n"
except:
print("vis = {},mean frames = {:.2f} +/- {:.2f}".format(
vis,np.mean(df_sub['probeFrames']),np.std(df_sub['probeFrames'])))
results.append([vis,np.mean(df_sub['probeFrames']),np.std(df_sub['probeFrames'])])
return results,empty_temp
def preprocess_behavioral_file(f):
df = read_behavorial_file(f)
for col in ['probeFrames_raw',
'response.keys_raw',
'visible.keys_raw']:
df[col] = df[col].apply(str2int)
df = df.sort_values(['order'])
return df
def read_behavorial_file(f):
temp = pd.read_csv(f).iloc[:-12,:]
return temp
def preload(f):
temp = pd.read_csv(f).iloc[-12:,:2]
return temp
def extract(x):
try:
return int(re.findall(r'\d',x)[0])
except:
return int(99)
#def extract_session_run_from_MRI(x):
# temp = re.findall(r'\d+',x)
# session = temp[1]
# if int(session) == 7:
# session = '1'
# run = temp[-1]
# return session,run
#def check_behaviral_data_session_block(x):
# temp = preload(x)
# temp.index = temp['category']
# temp = temp.T
# session = int(temp['session'].values[-1])
# block = int(temp['block'].values[-1])
# return session,block
#def compare_match(behavorial_file_name,session,block):
# behav_session,behav_block = check_behaviral_data_session_block(behavorial_file_name)
# if np.logical_and(behav_session == session, behav_block == block):
# return True
# else:
# return False
def add_track(df_sub):
n_rows = df_sub.shape[0]
if len(df_sub.index.values) > 1:
temp = '+'.join(str(item + 10) for item in df_sub.index.values)
else:
temp = str(df_sub.index.values[0])
df_sub = df_sub.iloc[0,:].to_frame().T # why did I use 1 instead of 0?
df_sub['n_volume'] = n_rows
df_sub['time_indices'] = temp
return df_sub
def groupby_average(fmri,df,groupby = ['trials']):
BOLD_average = np.array([np.mean(fmri[df_sub.index],0) for _,df_sub in df.groupby(groupby)])
df_average = pd.concat([add_track(df_sub) for ii,df_sub in df.groupby(groupby)])
return BOLD_average,df_average
def get_brightness_threshold(thresh):
return [0.75 * val for val in thresh]
def get_brightness_threshold_double(thresh):
return [2 * 0.75 * val for val in thresh]
def cartesian_product(fwhms, in_files, usans, btthresh):
from nipype.utils.filemanip import ensure_list
# ensure all inputs are lists
in_files = ensure_list(in_files)
fwhms = [fwhms] if isinstance(fwhms, (int, float)) else fwhms
# create cartesian product lists (s_<name> = single element of list)
cart_in_file = [
s_in_file for s_in_file in in_files for s_fwhm in fwhms
]
cart_fwhm = [
s_fwhm for s_in_file in in_files for s_fwhm in fwhms
]
cart_usans = [
s_usans for s_usans in usans for s_fwhm in fwhms
]
cart_btthresh = [
s_btthresh for s_btthresh in btthresh for s_fwhm in fwhms
]
return cart_in_file, cart_fwhm, cart_usans, cart_btthresh
def getusans(x):
return [[tuple([val[0], 0.5 * val[1]])] for val in x]
def create_fsl_FEAT_workflow_func(whichrun = 0,
whichvol = 'middle',
workflow_name = 'nipype_mimic_FEAT',
first_run = True,
func_data_file = 'temp',
fwhm = 3):
"""
Works with fsl-5.0.9 and fsl-5.0.11, but not fsl-6.0.0
"""
from nipype.workflows.fmri.fsl import preprocess
from nipype.interfaces import fsl
from nipype.interfaces import utility as util
from nipype.pipeline import engine as pe
"""
Setup some functions and hyperparameters
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
pickrun = preprocess.pickrun
pickvol = preprocess.pickvol
getthreshop = preprocess.getthreshop
getmeanscale = preprocess.getmeanscale
# chooseindex = preprocess.chooseindex
"""
Start constructing the workflow graph
"""
preproc = pe.Workflow(name = workflow_name)
"""
Initialize the input and output spaces
"""
inputnode = pe.Node(
interface = util.IdentityInterface(fields = ['func',
'fwhm',
'anat']),
name = 'inputspec')
outputnode = pe.Node(
interface = util.IdentityInterface(fields = ['reference',
'motion_parameters',
'realigned_files',
'motion_plots',
'mask',
'smoothed_files',
'mean']),
name = 'outputspec')
"""
first step: convert Images to float values
"""
img2float = pe.MapNode(
interface = fsl.ImageMaths(
out_data_type = 'float',
op_string = '',
suffix = '_dtype'),
iterfield = ['in_file'],
name = 'img2float')
preproc.connect(inputnode,'func',
img2float,'in_file')
"""
delete first 10 volumes
"""
develVolume = pe.MapNode(
interface = fsl.ExtractROI(t_min = 10,
t_size = 508),
iterfield = ['in_file'],
name = 'remove_volumes')
preproc.connect(img2float, 'out_file',
develVolume, 'in_file')
if first_run == True:
"""
extract example fMRI volume: middle one
"""
extract_ref = pe.MapNode(
interface = fsl.ExtractROI(t_size = 1,),
iterfield = ['in_file'],
name = 'extractref')
# connect to the deleteVolume node to get the data
preproc.connect(develVolume,'roi_file',
extract_ref,'in_file')
# connect to the deleteVolume node again to perform the extraction
preproc.connect(develVolume,('roi_file',pickvol,0,whichvol),
extract_ref,'t_min')
# connect to the output node to save the reference volume
preproc.connect(extract_ref,'roi_file',
outputnode, 'reference')
if first_run == True:
"""
Realign the functional runs to the reference (`whichvol` volume of first run)
"""
motion_correct = pe.MapNode(
interface = fsl.MCFLIRT(save_mats = True,
save_plots = True,
save_rms = True,
stats_imgs = True,
interpolation = 'spline'),
iterfield = ['in_file','ref_file'],
name = 'MCFlirt',
)
# connect to the develVolume node to get the input data
preproc.connect(develVolume, 'roi_file',
motion_correct, 'in_file',)
######################################################################################
################# the part where we replace the actual reference image if exists ####
######################################################################################
# connect to the develVolume node to get the reference
preproc.connect(extract_ref, 'roi_file',
motion_correct, 'ref_file')
######################################################################################
# connect to the output node to save the motion correction parameters
preproc.connect(motion_correct, 'par_file',
outputnode, 'motion_parameters')
# connect to the output node to save the other files
preproc.connect(motion_correct, 'out_file',
outputnode, 'realigned_files')
else:
"""
Realign the functional runs to the reference (`whichvol` volume of first run)
"""
motion_correct = pe.MapNode(
interface = fsl.MCFLIRT(ref_file = first_run,
save_mats = True,
save_plots = True,
save_rms = True,
stats_imgs = True,
interpolation = 'spline'),
iterfield = ['in_file','ref_file'],
name = 'MCFlirt',
)
# connect to the develVolume node to get the input data
preproc.connect(develVolume, 'roi_file',
motion_correct, 'in_file',)
# connect to the output node to save the motion correction parameters
preproc.connect(motion_correct, 'par_file',
outputnode, 'motion_parameters')
# connect to the output node to save the other files
preproc.connect(motion_correct, 'out_file',
outputnode, 'realigned_files')
"""
plot the estimated motion parameters
"""
plot_motion = pe.MapNode(
interface = fsl.PlotMotionParams(in_source = 'fsl'),
iterfield = ['in_file'],
name = 'plot_motion',
)
plot_motion.iterables = ('plot_type',['rotations',
'translations',
'displacement'])
preproc.connect(motion_correct, 'par_file',
plot_motion, 'in_file')
preproc.connect(plot_motion, 'out_file',
outputnode, 'motion_plots')
"""
extract the mean volume of the first functional run
"""
meanfunc = pe.Node(
interface = fsl.ImageMaths(op_string = '-Tmean',
suffix = '_mean',),
name = 'meanfunc')
preproc.connect(motion_correct, ('out_file',pickrun,whichrun),
meanfunc, 'in_file')
"""
strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.Node(
interface = fsl.BET(mask = True,
no_output = True,
frac = 0.3,
surfaces = True,),
name = 'bet2_mean_func')
preproc.connect(meanfunc, 'out_file',
meanfuncmask, 'in_file')
"""
Mask the motion corrected functional data with the mask to create the masked (bet) motion corrected functional data
"""
maskfunc = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_bet',
op_string = '-mas'),
iterfield = ['in_file'],
name = 'maskfunc')
preproc.connect(motion_correct, 'out_file',
maskfunc, 'in_file')
preproc.connect(meanfuncmask, 'mask_file',
maskfunc, 'in_file2')
"""
determine the 2nd and 98th percentiles of each functional run
"""
getthreshold = pe.MapNode(
interface = fsl.ImageStats(op_string = '-p 2 -p 98'),
iterfield = ['in_file'],
name = 'getthreshold')
preproc.connect(maskfunc, 'out_file',
getthreshold, 'in_file')
"""
threshold the functional data at 10% of the 98th percentile
"""
threshold = pe.MapNode(
interface = fsl.ImageMaths(out_data_type = 'char',
suffix = '_thresh',
op_string = '-Tmin -bin'),
iterfield = ['in_file','op_string'],
name = 'tresholding')
preproc.connect(maskfunc, 'out_file',
threshold,'in_file')
"""
define a function to get 10% of the intensity
"""
preproc.connect(getthreshold,('out_stat',getthreshop),
threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(
interface = fsl.ImageStats(op_string = '-k %s -p 50'),
iterfield = ['in_file','mask_file'],
name = 'cal_intensity_scale_factor')
preproc.connect(motion_correct, 'out_file',
medianval, 'in_file')
preproc.connect(threshold, 'out_file',
medianval, 'mask_file')
"""
dilate the mask
"""
dilatemask = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_dil',
op_string = '-dilF'),
iterfield = ['in_file'],
name = 'dilatemask')
preproc.connect(threshold, 'out_file',
dilatemask, 'in_file')
preproc.connect(dilatemask, 'out_file',
outputnode, 'mask')
"""
mask the motion corrected functional runs with the dilated mask
"""
dilateMask_MCed = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'dilateMask_MCed')
preproc.connect(motion_correct, 'out_file',
dilateMask_MCed, 'in_file',)
preproc.connect(dilatemask, 'out_file',
dilateMask_MCed, 'in_file2')
"""
We now take this functional data that is motion corrected, high pass filtered, and
create a "mean_func" image that is the mean across time (Tmean)
"""
meanfunc2 = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mean',
op_string = '-Tmean',),
iterfield = ['in_file'],
name = 'meanfunc2')
preproc.connect(dilateMask_MCed, 'out_file',
meanfunc2, 'in_file')
"""
smooth each run using SUSAN with the brightness threshold set to
75% of the median value for each run and a mask constituing the
mean functional
"""
merge = pe.Node(
interface = util.Merge(2, axis = 'hstack'),
name = 'merge')
preproc.connect(meanfunc2, 'out_file',
merge, 'in1')
preproc.connect(medianval,('out_stat',get_brightness_threshold_double),
merge, 'in2')
smooth = pe.MapNode(
interface = fsl.SUSAN(dimension = 3,
use_median = True),
iterfield = ['in_file',
'brightness_threshold',
'fwhm',
'usans'],
name = 'susan_smooth')
preproc.connect(dilateMask_MCed, 'out_file',
smooth, 'in_file')
preproc.connect(medianval, ('out_stat',get_brightness_threshold),
smooth, 'brightness_threshold')
preproc.connect(inputnode, 'fwhm',
smooth, 'fwhm')
preproc.connect(merge, ('out',getusans),
smooth, 'usans')
"""
mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'dilateMask_smoothed')
# connect the output of the susam smooth component to the maskfunc3 node
preproc.connect(smooth, 'smoothed_file',
maskfunc3, 'in_file')
# connect the output of the dilated mask to the maskfunc3 node
preproc.connect(dilatemask, 'out_file',
maskfunc3, 'in_file2')
"""
scale the median value of the run is set to 10000
"""
meanscale = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_intnorm'),
iterfield = ['in_file','op_string'],
name = 'meanscale')
preproc.connect(maskfunc3, 'out_file',
meanscale, 'in_file')
preproc.connect(meanscale, 'out_file',
outputnode,'smoothed_files')
"""
define a function to get the scaling factor for intensity normalization
"""
preproc.connect(medianval,('out_stat',getmeanscale),
meanscale,'op_string')
"""
generate a mean functional image from the first run
should this be the 'mean.nii.gz' we will use in the future?
"""
meanfunc3 = pe.MapNode(
interface = fsl.ImageMaths(suffix = '_mean',
op_string = '-Tmean',),
iterfield = ['in_file'],
name = 'gen_mean_func_img')
preproc.connect(meanscale, 'out_file',
meanfunc3, 'in_file')
preproc.connect(meanfunc3, 'out_file',
outputnode,'mean')
# initialize some of the input files
preproc.inputs.inputspec.func = os.path.abspath(func_data_file)
preproc.inputs.inputspec.fwhm = 3
preproc.base_dir = os.path.abspath('/'.join(
func_data_file.split('/')[:-1]))
output_dir = os.path.abspath(os.path.join(
preproc.base_dir,
'outputs',
'func'))
MC_dir = os.path.join(output_dir,'MC')
for directories in [output_dir,MC_dir]:
if not os.path.exists(directories):
os.makedirs(directories)
# initialize all the output files
if first_run == True:
preproc.inputs.extractref.roi_file = os.path.abspath(os.path.join(
output_dir,'example_func.nii.gz'))
preproc.inputs.dilatemask.out_file = os.path.abspath(os.path.join(
output_dir,'mask.nii.gz'))
preproc.inputs.meanscale.out_file = os.path.abspath(os.path.join(
output_dir,'prefiltered_func.nii.gz'))
preproc.inputs.gen_mean_func_img.out_file = os.path.abspath(os.path.join(
output_dir,'mean_func.nii.gz'))
return preproc,MC_dir,output_dir
def create_registration_workflow(
anat_brain,
anat_head,
example_func,
standard_brain,
standard_head,
standard_mask,
workflow_name = 'registration',
output_dir = 'temp'):
from nipype.interfaces import fsl
from nipype.interfaces import utility as util
from nipype.pipeline import engine as pe
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
registration = pe.Workflow(name = 'registration')
inputnode = pe.Node(
interface = util.IdentityInterface(
fields = [
'highres', # anat_brain
'highres_head', # anat_head
'example_func',
'standard', # standard_brain
'standard_head',
'standard_mask'
]),
name = 'inputspec')
outputnode = pe.Node(
interface = util.IdentityInterface(
fields = ['example_func2highres_nii_gz',
'example_func2highres_mat',
'linear_example_func2highres_log',
'highres2example_func_mat',
'highres2standard_linear_nii_gz',
'highres2standard_mat',
'linear_highres2standard_log',
'highres2standard_nii_gz',
'highres2standard_warp_nii_gz',
'highres2standard_head_nii_gz',
# 'highres2standard_apply_warp_nii_gz',
'highres2highres_jac_nii_gz',
'nonlinear_highres2standard_log',
'highres2standard_nii_gz',
'standard2highres_mat',
'example_func2standard_mat',
'example_func2standard_warp_nii_gz',
'example_func2standard_nii_gz',
'standard2example_func_mat',
]),
name = 'outputspec')
"""
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain standard
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm standard_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain_mask_dil standard_mask
"""
# skip
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in example_func
-ref highres
-out example_func2highres
-omat example_func2highres.mat
-cost corratio
-dof 7
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
linear_example_func2highres = pe.MapNode(
interface = fsl.FLIRT(cost = 'corratio',
interp = 'trilinear',
dof = 7,
save_log = True,
searchr_x = [-180, 180],
searchr_y = [-180, 180],
searchr_z = [-180, 180],),
iterfield = ['in_file','reference'],
name = 'linear_example_func2highres')
registration.connect(inputnode, 'example_func',
linear_example_func2highres, 'in_file')
registration.connect(inputnode, 'highres',
linear_example_func2highres, 'reference')
registration.connect(linear_example_func2highres, 'out_file',
outputnode, 'example_func2highres_nii_gz')
registration.connect(linear_example_func2highres, 'out_matrix_file',
outputnode, 'example_func2highres_mat')
registration.connect(linear_example_func2highres, 'out_log',
outputnode, 'linear_example_func2highres_log')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat highres2example_func.mat example_func2highres.mat
"""
get_highres2example_func = pe.MapNode(
interface = fsl.ConvertXFM(invert_xfm = True),
iterfield = ['in_file'],
name = 'get_highres2example_func')
registration.connect(linear_example_func2highres,'out_matrix_file',
get_highres2example_func,'in_file')
registration.connect(get_highres2example_func,'out_file',
outputnode,'highres2example_func_mat')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in highres
-ref standard
-out highres2standard
-omat highres2standard.mat
-cost corratio
-dof 12
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
linear_highres2standard = pe.MapNode(
interface = fsl.FLIRT(cost = 'corratio',
interp = 'trilinear',
dof = 12,
save_log = True,
searchr_x = [-180, 180],
searchr_y = [-180, 180],
searchr_z = [-180, 180],),
iterfield = ['in_file','reference'],
name = 'linear_highres2standard')
registration.connect(inputnode,'highres',
linear_highres2standard,'in_file')
registration.connect(inputnode,'standard',
linear_highres2standard,'reference',)
registration.connect(linear_highres2standard,'out_file',
outputnode,'highres2standard_linear_nii_gz')
registration.connect(linear_highres2standard,'out_matrix_file',
outputnode,'highres2standard_mat')
registration.connect(linear_highres2standard,'out_log',
outputnode,'linear_highres2standard_log')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/fnirt
--iout=highres2standard_head
--in=highres_head
--aff=highres2standard.mat
--cout=highres2standard_warp
--iout=highres2standard
--jout=highres2highres_jac
--config=T1_2_MNI152_2mm
--ref=standard_head
--refmask=standard_mask
--warpres=10,10,10
"""
nonlinear_highres2standard = pe.MapNode(
interface = fsl.FNIRT(warp_resolution = (10,10,10),
config_file = "T1_2_MNI152_2mm"),
iterfield = ['in_file','ref_file','affine_file','refmask_file'],
name = 'nonlinear_highres2standard')
# -- iout
registration.connect(nonlinear_highres2standard,'warped_file',
outputnode,'highres2standard_head_nii_gz')
# --in
registration.connect(inputnode,'highres',
nonlinear_highres2standard,'in_file')
# --aff
registration.connect(linear_highres2standard,'out_matrix_file',
nonlinear_highres2standard,'affine_file')
# --cout
registration.connect(nonlinear_highres2standard,'fieldcoeff_file',
outputnode,'highres2standard_warp_nii_gz')
# --jout
registration.connect(nonlinear_highres2standard,'jacobian_file',
outputnode,'highres2highres_jac_nii_gz')
# --ref
registration.connect(inputnode,'standard_head',
nonlinear_highres2standard,'ref_file',)
# --refmask
registration.connect(inputnode,'standard_mask',
nonlinear_highres2standard,'refmask_file')
# log
registration.connect(nonlinear_highres2standard,'log_file',
outputnode,'nonlinear_highres2standard_log')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
-i highres
-r standard
-o highres2standard
-w highres2standard_warp
"""
warp_highres2standard = pe.MapNode(
interface = fsl.ApplyWarp(),
iterfield = ['in_file','ref_file','field_file'],
name = 'warp_highres2standard')
registration.connect(inputnode,'highres',
warp_highres2standard,'in_file')
registration.connect(inputnode,'standard',
warp_highres2standard,'ref_file')
registration.connect(warp_highres2standard,'out_file',
outputnode,'highres2standard_nii_gz')
registration.connect(nonlinear_highres2standard,'fieldcoeff_file',
warp_highres2standard,'field_file')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2highres.mat highres2standard.mat
"""
get_standard2highres = pe.MapNode(
interface = fsl.ConvertXFM(invert_xfm = True),
iterfield = ['in_file'],
name = 'get_standard2highres')
registration.connect(linear_highres2standard,'out_matrix_file',
get_standard2highres,'in_file')
registration.connect(get_standard2highres,'out_file',
outputnode,'standard2highres_mat')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-omat example_func2standard.mat -concat highres2standard.mat example_func2highres.mat
"""
get_exmaple_func2standard = pe.MapNode(
interface = fsl.ConvertXFM(concat_xfm = True),
iterfield = ['in_file','in_file2'],
name = 'get_exmaple_func2standard')
registration.connect(linear_example_func2highres, 'out_matrix_file',
get_exmaple_func2standard,'in_file')
registration.connect(linear_highres2standard,'out_matrix_file',
get_exmaple_func2standard,'in_file2')
registration.connect(get_exmaple_func2standard,'out_file',
outputnode,'example_func2standard_mat')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convertwarp
--ref=standard
--premat=example_func2highres.mat
--warp1=highres2standard_warp
--out=example_func2standard_warp
"""
convertwarp_example2standard = pe.MapNode(
interface = fsl.ConvertWarp(),
iterfield = ['reference','premat','warp1'],
name = 'convertwarp_example2standard')
registration.connect(inputnode,'standard',
convertwarp_example2standard,'reference')
registration.connect(linear_example_func2highres,'out_matrix_file',
convertwarp_example2standard,'premat')
registration.connect(nonlinear_highres2standard,'fieldcoeff_file',
convertwarp_example2standard,'warp1')
registration.connect(convertwarp_example2standard,'out_file',
outputnode,'example_func2standard_warp_nii_gz')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
--ref=standard
--in=example_func
--out=example_func2standard
--warp=example_func2standard_warp
"""
warp_example2stand = pe.MapNode(
interface = fsl.ApplyWarp(),
iterfield = ['ref_file','in_file','field_file'],
name = 'warp_example2stand')
registration.connect(inputnode,'standard',
warp_example2stand,'ref_file')
registration.connect(inputnode,'example_func',
warp_example2stand,'in_file')
registration.connect(warp_example2stand,'out_file',
outputnode,'example_func2standard_nii_gz')
registration.connect(convertwarp_example2standard,'out_file',
warp_example2stand,'field_file')
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2example_func.mat example_func2standard.mat
"""
get_standard2example_func = pe.MapNode(
interface = fsl.ConvertXFM(invert_xfm = True),
iterfield = ['in_file'],
name = 'get_standard2example_func')
registration.connect(get_exmaple_func2standard,'out_file',
get_standard2example_func,'in_file')
registration.connect(get_standard2example_func,'out_file',
outputnode,'standard2example_func_mat')
registration.base_dir = output_dir
registration.inputs.inputspec.highres = anat_brain
registration.inputs.inputspec.highres_head= anat_head
registration.inputs.inputspec.example_func = example_func
registration.inputs.inputspec.standard = standard_brain
registration.inputs.inputspec.standard_head = standard_head
registration.inputs.inputspec.standard_mask = standard_mask
# define all the oupput file names with the directory
registration.inputs.linear_example_func2highres.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2highres.nii.gz'))
registration.inputs.linear_example_func2highres.out_matrix_file = os.path.abspath(os.path.join(output_dir,
'example_func2highres.mat'))
registration.inputs.linear_example_func2highres.out_log = os.path.abspath(os.path.join(output_dir,
'linear_example_func2highres.log'))
registration.inputs.get_highres2example_func.out_file = os.path.abspath(os.path.join(output_dir,
'highres2example_func.mat'))
registration.inputs.linear_highres2standard.out_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_linear.nii.gz'))
registration.inputs.linear_highres2standard.out_matrix_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.mat'))
registration.inputs.linear_highres2standard.out_log = os.path.abspath(os.path.join(output_dir,
'linear_highres2standard.log'))
# --iout
registration.inputs.nonlinear_highres2standard.warped_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
# --cout
registration.inputs.nonlinear_highres2standard.fieldcoeff_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_warp.nii.gz'))
# --jout
registration.inputs.nonlinear_highres2standard.jacobian_file = os.path.abspath(os.path.join(output_dir,
'highres2highres_jac.nii.gz'))
registration.inputs.nonlinear_highres2standard.log_file = os.path.abspath(os.path.join(output_dir,
'nonlinear_highres2standard.log'))
registration.inputs.warp_highres2standard.out_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
registration.inputs.get_standard2highres.out_file = os.path.abspath(os.path.join(output_dir,
'standard2highres.mat'))
registration.inputs.get_exmaple_func2standard.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2standard.mat'))
registration.inputs.convertwarp_example2standard.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2standard_warp.nii.gz'))
registration.inputs.warp_example2stand.out_file = os.path.abspath(os.path.join(output_dir,
'example_func2standard.nii.gz'))
registration.inputs.get_standard2example_func.out_file = os.path.abspath(os.path.join(output_dir,
'standard2example_func.mat'))
return registration
def _create_registration_workflow(anat_brain,
anat_head,
func_ref,
standard_brain,
standard_head,
standard_mask,
output_dir = 'temp'):
from nipype.interfaces import fsl
"""
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres
fslmaths /bcbl/home/public/Consciousness/uncon_feat/data/MRI/sub-01/anat/sub-01-T1W_mprage_sag_p2_1iso_MGH_day_6_nipy_brain highres_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain standard
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm standard_head
fslmaths /opt/fsl/fsl-5.0.9/fsl/data/standard/MNI152_T1_2mm_brain_mask_dil standard_mask
"""
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = anat_brain
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = anat_head
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres_head.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = standard_brain
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = standard_head
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard_head.nii.gz'))
fslmaths.cmdline
fslmaths.run()
fslmaths = fsl.ImageMaths()
fslmaths.inputs.in_file = standard_mask
fslmaths.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard_mask.nii.gz'))
fslmaths.cmdline
fslmaths.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in example_func
-ref highres
-out example_func2highres
-omat example_func2highres.mat
-cost corratio
-dof 7
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
flt = fsl.FLIRT()
flt.inputs.in_file = func_ref
flt.inputs.reference = anat_brain
flt.inputs.out_file = os.path.abspath(os.path.join(output_dir,'example_func2highres.nii.gz'))
flt.inputs.out_matrix_file = os.path.abspath(os.path.join(output_dir,'example_func2highres.mat'))
flt.inputs.out_log = os.path.abspath(os.path.join(output_dir,'example_func2highres.log'))
flt.inputs.cost = 'corratio'
flt.inputs.interp = 'trilinear'
flt.inputs.searchr_x = [-180, 180]
flt.inputs.searchr_y = [-180, 180]
flt.inputs.searchr_z = [-180, 180]
flt.inputs.dof = 7
flt.inputs.save_log = True
flt.cmdline
flt.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat highres2example_func.mat example_func2highres.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,"example_func2highres.mat"))
inverse_transformer.inputs.invert_xfm = True
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres2example_func.mat'))
inverse_transformer.cmdline
inverse_transformer.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/flirt
-in highres
-ref standard
-out highres2standard
-omat highres2standard.mat
-cost corratio
-dof 12
-searchrx -180 180
-searchry -180 180
-searchrz -180 180
-interp trilinear
"""
flt = fsl.FLIRT()
flt.inputs.in_file = anat_brain
flt.inputs.reference = standard_brain
flt.inputs.out_file = os.path.abspath(os.path.join(output_dir,'highres2standard_linear.nii.gz'))
flt.inputs.out_matrix_file = os.path.abspath(os.path.join(output_dir,'highres2standard.mat'))
flt.inputs.out_log = os.path.abspath(os.path.join(output_dir,'highres2standard.log'))
flt.inputs.cost = 'corratio'
flt.inputs.interp = 'trilinear'
flt.inputs.searchr_x = [-180, 180]
flt.inputs.searchr_y = [-180, 180]
flt.inputs.searchr_z = [-180, 180]
flt.inputs.dof = 12
flt.inputs.save_log = True
flt.cmdline
flt.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/fnirt
--iout=highres2standard_head
--in=highres_head
--aff=highres2standard.mat
--cout=highres2standard_warp
--iout=highres2standard
--jout=highres2highres_jac
--config=T1_2_MNI152_2mm
--ref=standard_head
--refmask=standard_mask
--warpres=10,10,10
"""
fnirt_mprage = fsl.FNIRT()
fnirt_mprage.inputs.warp_resolution = (10, 10, 10)
# --iout name of output image
fnirt_mprage.inputs.warped_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
# --in input image
fnirt_mprage.inputs.in_file = anat_head
# --aff affine transform
fnirt_mprage.inputs.affine_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.mat'))
# --cout output file with field coefficients
fnirt_mprage.inputs.fieldcoeff_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_warp.nii.gz'))
# --jout
fnirt_mprage.inputs.jacobian_file = os.path.abspath(os.path.join(output_dir,
'highres2highres_jac.nii.gz'))
# --config
fnirt_mprage.inputs.config_file = 'T1_2_MNI152_2mm'
# --ref
fnirt_mprage.inputs.ref_file = os.path.abspath(standard_head)
# --refmask
fnirt_mprage.inputs.refmask_file = os.path.abspath(standard_mask)
# --warpres
fnirt_mprage.inputs.log_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.log'))
fnirt_mprage.cmdline
fnirt_mprage.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
-i highres
-r standard
-o highres2standard
-w highres2standard_warp
"""
aw = fsl.ApplyWarp()
aw.inputs.in_file = anat_brain
aw.inputs.ref_file = os.path.abspath(standard_brain)
aw.inputs.out_file = os.path.abspath(os.path.join(output_dir,
'highres2standard.nii.gz'))
aw.inputs.field_file = os.path.abspath(os.path.join(output_dir,
'highres2standard_warp.nii.gz'))
aw.cmdline
aw.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2highres.mat highres2standard.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,"highres2standard.mat"))
inverse_transformer.inputs.invert_xfm = True
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'standard2highres.mat'))
inverse_transformer.cmdline
inverse_transformer.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-omat example_func2standard.mat -concat highres2standard.mat example_func2highres.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file2 = os.path.abspath(os.path.join(output_dir,"highres2standard.mat"))
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,
"example_func2highres.mat"))
inverse_transformer.inputs.concat_xfm = True
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,'example_func2standard.mat'))
inverse_transformer.cmdline
inverse_transformer.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convertwarp
--ref=standard
--premat=example_func2highres.mat
--warp1=highres2standard_warp
--out=example_func2standard_warp
"""
warputils = fsl.ConvertWarp()
warputils.inputs.reference = os.path.abspath(standard_brain)
warputils.inputs.premat = os.path.abspath(os.path.join(output_dir,
"example_func2highres.mat"))
warputils.inputs.warp1 = os.path.abspath(os.path.join(output_dir,
"highres2standard_warp.nii.gz"))
warputils.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard_warp.nii.gz"))
warputils.cmdline
warputils.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/applywarp
--ref=standard
--in=example_func
--out=example_func2standard
--warp=example_func2standard_warp
"""
aw = fsl.ApplyWarp()
aw.inputs.ref_file = os.path.abspath(standard_brain)
aw.inputs.in_file = os.path.abspath(func_ref)
aw.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard.nii.gz"))
aw.inputs.field_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard_warp.nii.gz"))
aw.run()
"""
/opt/fsl/fsl-5.0.10/fsl/bin/convert_xfm
-inverse -omat standard2example_func.mat example_func2standard.mat
"""
inverse_transformer = fsl.ConvertXFM()
inverse_transformer.inputs.in_file = os.path.abspath(os.path.join(output_dir,
"example_func2standard.mat"))
inverse_transformer.inputs.out_file = os.path.abspath(os.path.join(output_dir,
"standard2example_func.mat"))
inverse_transformer.inputs.invert_xfm = True
inverse_transformer.cmdline
inverse_transformer.run()
######################
###### plotting ######
example_func2highres = os.path.abspath(os.path.join(output_dir,
'example_func2highres'))
example_func2standard = os.path.abspath(os.path.join(output_dir,
"example_func2standard"))
highres2standard = os.path.abspath(os.path.join(output_dir,
'highres2standard'))
highres = os.path.abspath(anat_brain)
standard = os.path.abspath(standard_brain)
plot_example_func2highres = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png;
/bin/rm -f sl?.png {example_func2highres}2.png
/bin/rm {example_func2highres}1.png
""".replace("\n"," ")
plot_highres2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png;
/bin/rm -f sl?.png {highres2standard}2.png
/bin/rm {highres2standard}1.png
""".replace("\n"," ")
plot_example_func2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2standard}1.png - {example_func2standard}2.png {example_func2standard}.png;
/bin/rm -f sl?.png {example_func2standard}2.png
""".replace("\n"," ")
for cmdline in [plot_example_func2highres,plot_example_func2standard,plot_highres2standard]:
os.system(cmdline)
def create_simple_struc2BOLD(roi,
roi_name,
preprocessed_functional_dir,
output_dir):
from nipype.interfaces import fsl
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as util
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
simple_workflow = pe.Workflow(name = 'struc2BOLD')
inputnode = pe.Node(interface = util.IdentityInterface(
fields = ['flt_in_file',
'flt_in_matrix',
'flt_reference',
'mask']),
name = 'inputspec')
outputnode = pe.Node(interface = util.IdentityInterface(
fields = ['BODL_mask']),
name = 'outputspec')
"""
flirt
-in /export/home/dsoto/dsoto/fmri/$s/sess2/label/$i
-ref /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/example_func.nii.gz
-applyxfm
-init /export/home/dsoto/dsoto/fmri/$s/sess2/run1_prepro1.feat/reg/highres2example_func.mat
-out /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i}
"""
flirt_convert = pe.MapNode(
interface = fsl.FLIRT(apply_xfm = True),
iterfield = ['in_file',
'reference',
'in_matrix_file'],
name = 'flirt_convert')
simple_workflow.connect(inputnode, 'flt_in_file',
flirt_convert, 'in_file')
simple_workflow.connect(inputnode, 'flt_reference',
flirt_convert, 'reference')
simple_workflow.connect(inputnode, 'flt_in_matrix',
flirt_convert, 'in_matrix_file')
"""
fslmaths /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -mul 2
-thr `fslstats /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i} -p 99.6`
-bin /export/home/dsoto/dsoto/fmri/$s/label/BOLD${i}
"""
def getthreshop(thresh):
return ['-mul 2 -thr %.10f -bin' % (val) for val in thresh]
getthreshold = pe.MapNode(
interface = fsl.ImageStats(op_string='-p 99.6'),
iterfield = ['in_file','mask_file'],
name = 'getthreshold')
simple_workflow.connect(flirt_convert, 'out_file',
getthreshold, 'in_file')
simple_workflow.connect(inputnode, 'mask',
getthreshold, 'mask_file')
threshold = pe.MapNode(
interface = fsl.ImageMaths(
suffix = '_thresh',
op_string = '-mul 2 -bin'),
iterfield = ['in_file','op_string'],
name = 'thresholding')
simple_workflow.connect(flirt_convert, 'out_file',
threshold, 'in_file')
simple_workflow.connect(getthreshold, ('out_stat',getthreshop),
threshold, 'op_string')
# simple_workflow.connect(threshold,'out_file',outputnode,'BOLD_mask')
bound_by_mask = pe.MapNode(
interface = fsl.ImageMaths(
suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'bound_by_mask')
simple_workflow.connect(threshold, 'out_file',
bound_by_mask, 'in_file')
simple_workflow.connect(inputnode, 'mask',
bound_by_mask, 'in_file2')
simple_workflow.connect(bound_by_mask, 'out_file',
outputnode, 'BOLD_mask')
# setup inputspecs
simple_workflow.inputs.inputspec.flt_in_file = roi
simple_workflow.inputs.inputspec.flt_in_matrix = os.path.abspath(os.path.join(preprocessed_functional_dir,
'reg',
'highres2example_func.mat'))
simple_workflow.inputs.inputspec.flt_reference = os.path.abspath(os.path.join(preprocessed_functional_dir,
'func',
'example_func.nii.gz'))
simple_workflow.inputs.inputspec.mask = os.path.abspath(os.path.join(preprocessed_functional_dir,
'func',
'mask.nii.gz'))
simple_workflow.inputs.bound_by_mask.out_file = os.path.abspath(os.path.join(output_dir,
roi_name.replace('_fsl.nii.gz',
'_BOLD.nii.gz')))
return simple_workflow
def registration_plotting(output_dir,
anat_brain,
standard_brain):
######################
###### plotting ######
try:
example_func2highres = os.path.abspath(os.path.join(output_dir,
'example_func2highres'))
example_func2standard = os.path.abspath(os.path.join(output_dir,
'example_func2standard_warp'))
highres2standard = os.path.abspath(os.path.join(output_dir,
'highres2standard'))
highres = os.path.abspath(anat_brain)
standard = os.path.abspath(standard_brain)
plot_example_func2highres = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2highres} {highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres} {example_func2highres} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2highres}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2highres}1.png - {example_func2highres}2.png {example_func2highres}.png;
/bin/rm -f sl?.png {example_func2highres}2.png
/bin/rm {example_func2highres}1.png
""".replace("\n"," ")
plot_highres2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {highres2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {highres2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {highres2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {highres2standard}1.png - {highres2standard}2.png {highres2standard}.png;
/bin/rm -f sl?.png {highres2standard}2.png
/bin/rm {highres2standard}1.png
""".replace("\n"," ")
plot_example_func2standard = f"""
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {example_func2standard} {standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}1.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/slicer {standard} {example_func2standard} -s 2 -x 0.35 sla.png -x 0.45 slb.png -x 0.55 slc.png -x 0.65 sld.png -y 0.35 sle.png -y 0.45 slf.png -y 0.55 slg.png -y 0.65 slh.png -z 0.35 sli.png -z 0.45 slj.png -z 0.55 slk.png -z 0.65 sll.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend sla.png + slb.png + slc.png + sld.png + sle.png + slf.png + slg.png + slh.png + sli.png + slj.png + slk.png + sll.png {example_func2standard}2.png ;
/opt/fsl/fsl-5.0.10/fsl/bin/pngappend {example_func2standard}1.png - {example_func2standard}2.png {example_func2standard}.png;
/bin/rm -f sl?.png {example_func2standard}2.png
""".replace("\n"," ")
for cmdline in [plot_example_func2highres,
plot_example_func2standard,
plot_highres2standard]:
os.system(cmdline)
except:
print('you should not use python 2.7, update your python!!')
def create_highpass_filter_workflow(workflow_name = 'highpassfiler',
HP_freq = 60,
TR = 0.85):
from nipype.workflows.fmri.fsl import preprocess
from nipype.interfaces import fsl
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as util
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
getthreshop = preprocess.getthreshop
getmeanscale = preprocess.getmeanscale
highpass_workflow = pe.Workflow(name = workflow_name)
inputnode = pe.Node(interface = util.IdentityInterface(
fields = ['ICAed_file',]),
name = 'inputspec')
outputnode = pe.Node(interface = util.IdentityInterface(
fields = ['filtered_file']),
name = 'outputspec')
img2float = pe.MapNode(interface = fsl.ImageMaths(out_data_type = 'float',
op_string = '',
suffix = '_dtype'),
iterfield = ['in_file'],
name = 'img2float')
highpass_workflow.connect(inputnode,'ICAed_file',
img2float,'in_file')
getthreshold = pe.MapNode(interface = fsl.ImageStats(op_string = '-p 2 -p 98'),
iterfield = ['in_file'],
name = 'getthreshold')
highpass_workflow.connect(img2float, 'out_file',
getthreshold, 'in_file')
thresholding = pe.MapNode(interface = fsl.ImageMaths(out_data_type = 'char',
suffix = '_thresh',
op_string = '-Tmin -bin'),
iterfield = ['in_file','op_string'],
name = 'thresholding')
highpass_workflow.connect(img2float, 'out_file',
thresholding, 'in_file')
highpass_workflow.connect(getthreshold,('out_stat',getthreshop),
thresholding,'op_string')
dilatemask = pe.MapNode(interface = fsl.ImageMaths(suffix = '_dil',
op_string = '-dilF'),
iterfield = ['in_file'],
name = 'dilatemask')
highpass_workflow.connect(thresholding,'out_file',
dilatemask,'in_file')
maskfunc = pe.MapNode(interface = fsl.ImageMaths(suffix = '_mask',
op_string = '-mas'),
iterfield = ['in_file','in_file2'],
name = 'apply_dilatemask')
highpass_workflow.connect(img2float, 'out_file',
maskfunc, 'in_file')
highpass_workflow.connect(dilatemask, 'out_file',
maskfunc, 'in_file2')
medianval = pe.MapNode(interface = fsl.ImageStats(op_string = '-k %s -p 50'),
iterfield = ['in_file','mask_file'],
name = 'cal_intensity_scale_factor')
highpass_workflow.connect(img2float, 'out_file',
medianval, 'in_file')
highpass_workflow.connect(thresholding, 'out_file',
medianval, 'mask_file')
meanscale = pe.MapNode(interface = fsl.ImageMaths(suffix = '_intnorm'),
iterfield = ['in_file','op_string'],
name = 'meanscale')
highpass_workflow.connect(maskfunc, 'out_file',
meanscale, 'in_file')
highpass_workflow.connect(medianval, ('out_stat',getmeanscale),
meanscale, 'op_string')
meanfunc = pe.MapNode(interface = fsl.ImageMaths(suffix = '_mean',
op_string = '-Tmean'),
iterfield = ['in_file'],
name = 'meanfunc')
highpass_workflow.connect(meanscale, 'out_file',
meanfunc, 'in_file')
hpf = pe.MapNode(interface = fsl.ImageMaths(suffix = '_tempfilt',
op_string = '-bptf %.10f -1' % (HP_freq/2/TR)),
iterfield = ['in_file'],
name = 'highpass_filering')
highpass_workflow.connect(meanscale,'out_file',
hpf, 'in_file',)
addMean = pe.MapNode(interface = fsl.BinaryMaths(operation = 'add'),
iterfield = ['in_file','operand_file'],
name = 'addmean')
highpass_workflow.connect(hpf, 'out_file',
addMean, 'in_file')
highpass_workflow.connect(meanfunc, 'out_file',
addMean, 'operand_file')
highpass_workflow.connect(addMean, 'out_file',
outputnode,'filtered_file')
return highpass_workflow
def load_csv(f,print_ = False):
temp = re.findall(r'\d+',f)
n_session = int(temp[-2])
n_run = int(temp[-1])
if print_:
print(n_session,n_run)
df = pd.read_csv(f)
df['session'] = n_session
df['run'] = n_run
df['id'] = df['session'] * 1000 + df['run'] * 100 + df['trials']
return df
def build_model_dictionary(print_train = False,
class_weight = 'balanced',
remove_invariant = True,
n_jobs = 1):
np.random.seed(12345)
svm = LinearSVC(penalty = 'l2', # default
dual = True, # default
tol = 1e-3, # not default
random_state = 12345, # not default
max_iter = int(1e3), # default
class_weight = class_weight, # not default
)
svm = CalibratedClassifierCV(base_estimator = svm,
method = 'sigmoid',
cv = 8)
xgb = XGBClassifier(
learning_rate = 1e-3, # not default
max_depth = 10, # not default
n_estimators = 100, # not default
objective = 'binary:logistic', # default
booster = 'gbtree', # default
subsample = 0.9, # not default
colsample_bytree = 0.9, # not default
reg_alpha = 0, # default
reg_lambda = 1, # default
random_state = 12345, # not default
importance_type = 'gain', # default
n_jobs = n_jobs,# default to be 1
)
bagging = BaggingClassifier(base_estimator = svm,
n_estimators = 30, # not default
max_features = 0.9, # not default
max_samples = 0.9, # not default
bootstrap = True, # default
bootstrap_features = True, # default
random_state = 12345, # not default
)
RF = SelectFromModel(xgb,
prefit = False,
threshold = 'median' # induce sparsity
)
uni = SelectPercentile(mutual_info_classif,50) # so annoying that I cannot control the random state
knn = KNeighborsClassifier()
tree = DecisionTreeClassifier(random_state = 12345,
class_weight = class_weight)
dummy = DummyClassifier(strategy = 'uniform',random_state = 12345,)
if remove_invariant:
RI = VarianceThreshold()
models = OrderedDict([
['None + Dummy', make_pipeline(RI,MinMaxScaler(),
dummy,)],
['None + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
svm,)],
['None + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
bagging,)],
['None + KNN', make_pipeline(RI,MinMaxScaler(),
knn,)],
['None + Tree', make_pipeline(RI,MinMaxScaler(),
tree,)],
['PCA + Dummy', make_pipeline(RI,MinMaxScaler(),
PCA(),
dummy,)],
['PCA + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
PCA(),
svm,)],
['PCA + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
PCA(),
bagging,)],
['PCA + KNN', make_pipeline(RI,MinMaxScaler(),
PCA(),
knn,)],
['PCA + Tree', make_pipeline(RI,MinMaxScaler(),
PCA(),
tree,)],
['Mutual + Dummy', make_pipeline(RI,MinMaxScaler(),
uni,
dummy,)],
['Mutual + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
uni,
svm,)],
['Mutual + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
uni,
bagging,)],
['Mutual + KNN', make_pipeline(RI,MinMaxScaler(),
uni,
knn,)],
['Mutual + Tree', make_pipeline(RI,MinMaxScaler(),
uni,
tree,)],
['RandomForest + Dummy', make_pipeline(RI,MinMaxScaler(),
RF,
dummy,)],
['RandomForest + Linear-SVM', make_pipeline(RI,MinMaxScaler(),
RF,
svm,)],
['RandomForest + Ensemble-SVMs', make_pipeline(RI,MinMaxScaler(),
RF,
bagging,)],
['RandomForest + KNN', make_pipeline(RI,MinMaxScaler(),
RF,
knn,)],
['RandomForest + Tree', make_pipeline(RI,MinMaxScaler(),
RF,
tree,)],]
)
else:
models = OrderedDict([
['None + Dummy', make_pipeline(MinMaxScaler(),
dummy,)],
['None + Linear-SVM', make_pipeline(MinMaxScaler(),
svm,)],
['None + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
bagging,)],
['None + KNN', make_pipeline(MinMaxScaler(),
knn,)],
['None + Tree', make_pipeline(MinMaxScaler(),
tree,)],
['PCA + Dummy', make_pipeline(MinMaxScaler(),
PCA(),
dummy,)],
['PCA + Linear-SVM', make_pipeline(MinMaxScaler(),
PCA(),
svm,)],
['PCA + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
PCA(),
bagging,)],
['PCA + KNN', make_pipeline(MinMaxScaler(),
PCA(),
knn,)],
['PCA + Tree', make_pipeline(MinMaxScaler(),
PCA(),
tree,)],
['Mutual + Dummy', make_pipeline(MinMaxScaler(),
uni,
dummy,)],
['Mutual + Linear-SVM', make_pipeline(MinMaxScaler(),
uni,
svm,)],
['Mutual + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
uni,
bagging,)],
['Mutual + KNN', make_pipeline(MinMaxScaler(),
uni,
knn,)],
['Mutual + Tree', make_pipeline(MinMaxScaler(),
uni,
tree,)],
['RandomForest + Dummy', make_pipeline(MinMaxScaler(),
RF,
dummy,)],
['RandomForest + Linear-SVM', make_pipeline(MinMaxScaler(),
RF,
svm,)],
['RandomForest + Ensemble-SVMs', make_pipeline(MinMaxScaler(),
RF,
bagging,)],
['RandomForest + KNN', make_pipeline(MinMaxScaler(),
RF,
knn,)],
['RandomForest + Tree', make_pipeline(MinMaxScaler(),
RF,
tree,)],]
)
return models
def get_blocks(df__,label_map,):
ids = df__['id'].values
chunks = df__['session'].values
words = df__['labels'].values
labels = np.array([label_map[item] for item in df__['targets'].values])[:,-1]
sample_indecies = np.arange(len(labels))
blocks = [np.array([ids[ids == target],
chunks[ids == target],
words[ids == target],
labels[ids == target],
sample_indecies[ids == target]
]) for target in np.unique(ids)
]
block_labels = np.array([np.unique(ll[-2]) for ll in blocks]).ravel()
return blocks,block_labels
def make_unique_class_target(df_data):
make_class = {name:[] for name in pd.unique(df_data['targets'])}
for ii,df_sub in df_data.groupby(['labels']):
target = pd.unique(df_sub['targets'])
label = pd.unique(df_sub['labels'])
make_class[target[0]].append(label[0])
return make_class
def Find_Optimal_Cutoff(target, predicted):
""" Find the optimal probability cutoff point for a classification model related to event rate
Parameters
----------
target : Matrix with dependent or target data, where rows are observations
predicted : Matrix with predicted data, where rows are observations
Returns
-------
list type, with optimal cutoff value
"""
fpr, tpr, threshold = roc_curve(target, predicted)
i = np.arange(len(tpr))
roc = pd.DataFrame({'tf' : pd.Series(tpr-(1-fpr), index=i), 'threshold' : pd.Series(threshold, index=i)})
roc_t = roc.iloc[(roc.tf-0).abs().argsort()[:1]]
return list(roc_t['threshold'])
def customized_partition(df_data,groupby_column = ['id','labels'],n_splits = 100,):
"""
modified for unaveraged volumes
"""
idx_object = dict(ids = [],idx = [],labels = [])
for label,df_sub in df_data.groupby(groupby_column):
idx_object['ids'].append(label[0])
idx_object['idx'].append(df_sub.index.tolist())
idx_object['labels'].append(label[-1])
df_object = pd.DataFrame(idx_object)
idxs_test = []
for counter in range(int(1e4)):
idx_test = [np.random.choice(item['idx'].values) for ii,item in df_object.groupby(groupby_column[-1])]
if counter >= n_splits:
return [np.concatenate(item) for item in idxs_test]
break
if counter > 0:
temp = []
for used in idxs_test:
used_temp = [','.join(str(ii) for ii in item) for item in used]
idx_test_temp = [','.join(str(ii) for ii in item) for item in idx_test]
a = set(used_temp)
b = set(idx_test_temp)
temp.append(len(a.intersection(b)) != len(idx_test))
if all(temp) == True:
idxs_test.append(idx_test)
else:
idxs_test.append(idx_test)
def check_train_test_splits(idxs_test):
"""
check if we get repeated test sets
"""
temp = []
for ii,item1 in enumerate(idxs_test):
for jj,item2 in enumerate(idxs_test):
if not ii == jj:
if len(item1) == len(item2):
sample1 = np.sort(item1)
sample2 = | np.sort(item2) | numpy.sort |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import sys
sys.path.append('../')
from defragTrees import *
import BATree
from RForest import RForest
import numpy as np
import re
from sklearn import tree
from sklearn.grid_search import GridSearchCV
#************************
# inTree Class
#************************
class inTreeModel(RuleModel):
def __init__(self, modeltype='regression'):
super().__init__(modeltype=modeltype)
#************************
# Fit and Related Methods
#************************
def fit(self, X, y, filename, featurename=[]):
self.dim_ = X.shape[1]
self.setfeaturename(featurename)
self.setdefaultpred(y)
if self.modeltype_ == 'regression':
v1 = np.percentile(y, 17)
v2 = np.percentile(y, 50)
v3 = np.percentile(y, 83)
val = (v1, v2, v3)
mdl = self.__parsInTreesFile(filename)
for m in mdl:
if m[3] == 'X[,1]==X[,1]':
self.rule_.append([])
else:
subrule = []
ll = m[3].split(' & ')
for r in ll:
id1 = r.find(',') + 1
id2 = r.find(']')
idx = int(r[id1:id2])
if '>' in r:
v = 1
id1 = r.find('>') + 1
t = float(r[id1:])
else:
v = 0
id1 = r.find('<=') + 2
t = float(r[id1:])
subrule.append((idx, v, t))
self.rule_.append(subrule)
if self.modeltype_ == 'classification':
self.pred_.append(int(m[4]))
elif self.modeltype_ == 'regression':
if m[4] == 'L1':
self.pred_.append(val[0])
elif m[4] == 'L2':
self.pred_.append(val[1])
elif m[4] == 'L3':
self.pred_.append(val[2])
self.weight_ = np.arange(len(self.rule_))[::-1].tolist()
def __parsInTreesFile(self, filename):
f = open(filename)
line = f.readline()
mdl = []
while line:
if not'[' in line:
line = f.readline()
continue
id1 = line.find('[') + 1
id2 = line.find(',')
idx = int(line[id1:id2])
if idx > len(mdl):
mdl.append(re.findall(r'"([^"]*)"', line))
else:
mdl[idx-1] += re.findall(r'"([^"]*)"', line)
line = f.readline()
f.close()
return mdl
#************************
# NHarvest Class
#************************
class NHarvestModel(RuleModel):
def __init__(self, modeltype='regression'):
super().__init__(modeltype=modeltype)
#************************
# Fit and Related Methods
#************************
def fit(self, X, y, filename, featurename=[]):
self.dim_ = X.shape[1]
self.setfeaturename(featurename)
rule, pred, weight = self.__parsNHarvestFile(filename)
self.setdefaultpred(pred[-1])
idx = np.argsort(weight[:-1])[::-1]
self.rule_ = [rule[i] for i in idx]
if self.modeltype_ == 'regression':
self.pred_ = [pred[i] for i in idx]
elif self.modeltype_ == 'classification':
self.pred_ = (np.array([pred[i] for i in idx]) > 0.5).astype(int).tolist()
self.weight_ = [weight[i] for i in idx]
def __parsNHarvestFile(self, filename):
f = open(filename)
line = f.readline()
rule = []
pred = []
weight = []
while line:
f.readline()
subrule = []
line = f.readline()
while (line[0] != 'a'):
s = line.split()
idx = int(s[1])
low = float(s[2])
up = float(s[3])
if not np.isinf(low):
subrule.append((idx, 1, low))
if not np.isinf(up):
subrule.append((idx, 0, up))
line = f.readline()
if (len(subrule) > 0):
rule.append(subrule)
while True:
line = f.readline()
if (line[0] == 'a'):
s = line.split('"')
if (s[1] == 'predict'):
break
line = f.readline()
s = line.split()
pred.append(float(s[1]))
f.readline()
line = f.readline()
s = line.split()
weight.append(float(s[1]))
line = f.readline()
line = f.readline()
line = f.readline()
line = f.readline()
if not line[:2] == '[[':
break
f.close()
return rule, pred, weight
#************************
# DTree Class
#************************
class DTreeModel(RuleModel):
def __init__(self, modeltype='regression', max_depth=[None, 2, 4, 6, 8], min_samples_leaf=[5, 10, 20, 30], cv=5):
super().__init__(modeltype=modeltype)
self.max_depth_ = max_depth
self.min_samples_leaf_ = min_samples_leaf
self.cv_ = cv
#************************
# Fit and Related Methods
#************************
def fit(self, X, y, featurename=[]):
self.dim_ = X.shape[1]
self.setfeaturename(featurename)
self.setdefaultpred(y)
param_grid = {"max_depth": self.max_depth_, "min_samples_leaf": self.min_samples_leaf_}
if self.modeltype_ == 'regression':
mdl = tree.DecisionTreeRegressor()
elif self.modeltype_ == 'classification':
mdl = tree.DecisionTreeClassifier()
grid_search = GridSearchCV(mdl, param_grid=param_grid, cv=self.cv_)
grid_search.fit(X, y)
mdl = grid_search.best_estimator_
self.__parseTree(mdl)
self.weight_ = np.ones(len(self.rule_))
def __parseTree(self, mdl):
t = mdl.tree_
m = len(t.value)
left = t.children_left
right = t.children_right
feature = t.feature
threshold = t.threshold
value = t.value
parent = [-1] * m
ctype = [-1] * m
for i in range(m):
if not left[i] == -1:
parent[left[i]] = i
ctype[left[i]] = 0
if not right[i] == -1:
parent[right[i]] = i
ctype[right[i]] = 1
for i in range(m):
if not left[i] == -1:
continue
subrule = []
c = ctype[i]
idx = parent[i]
while not idx == -1:
subrule.append((int(feature[idx])+1, c, threshold[idx]))
c = ctype[idx]
idx = parent[idx]
self.rule_.append(subrule)
if np.array(value[i]).size > 1:
self.pred_.append(np.argmax(np.array(value[i])))
else:
self.pred_.append(np.asscalar(value[i]))
#************************
# BTree Class
#************************
class BTreeModel(RuleModel):
def __init__(self, modeltype='regression', max_depth=[2, 3, 4, 6, 8, 10], min_samples_leaf=[10], cv=5, smear_num=100, njobs=1, seed=0):
super().__init__(modeltype=modeltype)
self.max_depth_ = max_depth
self.min_samples_leaf_ = min_samples_leaf
self.cv_ = cv
self.smear_num_ = smear_num
self.njobs_ = njobs
self.seed_ = seed
#************************
# Fit and Related Methods
#************************
def fit(self, X, y, dirname, featurename=[]):
self.dim_ = X.shape[1]
self.setfeaturename(featurename)
self.setdefaultpred(y)
mdl = RForest(modeltype=self.modeltype_)
mdl.fit(dirname)
tree = BATree.fitBATreeCV(X, y, mdl, modeltype=self.modeltype_, max_depth=self.max_depth_, min_samples_split=self.min_samples_leaf_, cv=self.cv_, seed=self.seed_, smear_num=self.smear_num_, njobs=self.njobs_)
self.__parseTree(tree)
self.weight_ = np.ones(len(self.rule_))
return tree
def __parseTree(self, tree):
m = len(tree.pred_)
left = tree.left_
right = tree.right_
feature = tree.index_
threshold = tree.threshold_
value = tree.pred_
parent = [-1] * m
ctype = [-1] * m
for i in range(m):
if not left[i] == -1:
parent[left[i]] = i
ctype[left[i]] = 0
if not right[i] == -1:
parent[right[i]] = i
ctype[right[i]] = 1
for i in range(m):
if not left[i] == -1:
continue
subrule = []
c = ctype[i]
idx = parent[i]
while not idx == -1:
subrule.append((int(feature[idx])+1, c, threshold[idx]))
c = ctype[idx]
idx = parent[idx]
self.rule_.append(subrule)
if np.array(value[i]).size > 1:
self.pred_.append(np.argmax(np.array(value[i])))
else:
self.pred_.append( | np.asscalar(value[i]) | numpy.asscalar |
from __future__ import print_function
import numpy as np
import multiprocessing as mp
import time
from scipy.integrate import simps
from functools import partial
from scdn.validation_truncation_1 import cross_validation
from scdn.model_config import Modelconfig, Modelpara
import os
from six.moves import cPickle as pkl
import random
import glob
import six
def error_ws_0(y, gamma_ini, lam_1, P12, Omega):
n_area = y.shape[0]
e1 = np.sum((y-np.dot(gamma_ini,np.transpose(P12)))**2)
plt_1 = 0
for i in range(n_area):
plt_1 = plt_1 + np.dot(np.dot(gamma_ini[i,:],Omega),gamma_ini[i,:])
return e1+lam_1*plt_1
def error_ws(y, gamma_ini, lam_1, P12, Omega):
stp=1
while(stp<1000):
gr=np.dot((np.dot(gamma_ini,np.transpose(P12))-y),P12)*2+2*lam_1*np.dot(gamma_ini,np.transpose(Omega))
n_gr=(np.sum(gr**2))
f_t=1
fixed=error_ws_0(y, gamma_ini, lam_1, P12, Omega)
while(error_ws_0(y, gamma_ini-f_t*gr, lam_1, P12, Omega)>fixed-0.5*f_t*n_gr):
f_t=0.8*f_t
gamma_ini=gamma_ini-gr*f_t
stp=stp+1
if n_gr**0.5<0.001:
break
return gamma_ini
def update_p(file_name_dir, precomp_dir, pickle_file, tol, max_iter, multi, init, saved, lamu):
"""
main algorithm, updating parameter for a defined problem
Parameters
-----------
file_name_dir: dir of problem folder
precomp_dir: dir of precomputed data
pickle_file: file name which we use to save estimations
lamu: list = [lam, mu, mu_1, mu_2, lam_1], in our paper, lam*mu, lam*mu_1*mu, lam*mu_2*mu is the coefficient
for l1 norm penalty of A, B, C. lam_1 is the penalty for the second dirivative of estimated neural activities.
tol, max_iter:
multi: boolean variable, Default True
init: boolean variable, whether to use two-step method
saved: boolean variable, whether the initial value for two-step method has been saved
"""
configpara = Modelpara(precomp_dir+'precomp.pkl')
config = Modelconfig(file_name_dir+'data/observed.pkl')
if init:
init_dir = precomp_dir[:-5] + 'init/results/result.pkl'
if saved:
B_u = True
else:
B_u = False
config.B_u = B_u
P1 = configpara.P1
P2 = configpara.P2
P3 = configpara.P3
P4 = configpara.P4
P5 = configpara.P5
P6 = configpara.P6
P7 = configpara.P7
P8 = configpara.P8
P9 = configpara.P9
P10 = configpara.P10
P11 = configpara.P11
P12 = configpara.P12
P13 = configpara.P13
P14 = configpara.P14
P15 = configpara.P15
Q1 = configpara.Q1
Q2 = configpara.Q2
Q3 = configpara.Q3
Q4 = configpara.Q4
Omega = configpara.Omega
y = config.y
n_area = config.n_area
p = configpara.p
t_i = configpara.t_i
l_t = configpara.l_t
J = configpara.J
t_T = configpara.t_T
###################################################################################
def gr(gamma, A, B, C, D, lam, mu, mu_1, lam_1):
g = np.zeros((n_area,p))
g = g + np.dot(gamma,P1) - np.dot(np.dot(np.transpose(A),gamma),np.transpose(P2))
g = g - np.dot(np.dot(A,gamma),P2) + np.dot(np.dot(np.dot(np.transpose(A),A),gamma),P5)
tmp_1 = 0
tmp_2 = 0
for j in range(J):
tmp_1 = tmp_1+np.dot(np.dot(B[:,:,j],gamma),P3[:,:,j])
tmp_2 = tmp_2+np.dot(np.dot(np.dot(np.transpose(A),B[:,:,j]),gamma),P6[:,:,j])
g = g-(tmp_1-tmp_2)
g = g-np.dot(C,P4)+np.dot(np.dot(np.transpose(A),C),P7)
g = g-np.dot(D,P8)+np.dot(np.dot(np.transpose(A),D),P9)
tmp = 0
for l in range(J):
tmp_1 = 0
for j in range(J):
tmp_1 = np.dot(np.dot(B[:,:,j],gamma),P10[:,:,j,l])
tmp = tmp-np.dot(np.transpose(B[:,:,l]),(np.dot(gamma,np.transpose(P3[:,:,l])) - np.dot(np.dot(A,gamma),np.transpose(P6[:,:,l]))-tmp_1-np.dot(C,P13[:,:,l])-np.dot(D,P11[l,:].reshape((1,-1)))))
g = g+tmp
g = g*2*lam
tmp1 = np.zeros((n_area,1))
tmp2 = np.zeros((n_area,J))
for m in range(n_area):
tmp1[m,0] = np.sum(abs(A[:,m]))/np.dot(np.dot(gamma[m,:],P5),gamma[m,])**0.5
for j in range(J):
tmp2[m,j] = np.sum(abs(B[:,m,j]))/np.dot(np.dot(gamma[m,:],P10[:,:,j,j]),gamma[m,:])**0.5
g = g + lam*mu*np.dot(gamma,np.transpose(P5))*tmp1
for j in range(J):
g = g + lam*mu_1*np.dot(gamma,P10[:,:,j,j])*(tmp2[:,j].reshape((-1,1)))
g = g + np.dot((np.dot(gamma,np.transpose(P12))-y),P12)*2
g = g + 2*lam_1*np.dot(gamma,np.transpose(Omega))
g[np.isnan(g)]=0
return g
def cd_thre(tmp, tmp_1, mu):
mu = mu/2.0
return np.maximum((abs(tmp)-mu*(tmp_1**0.5))/tmp_1,0)*np.sign(tmp)
def update_A(n, gamma, A, B, C, D, mu):
tmp_0 = 0
for j in range(J):
tmp_0 = tmp_0 + np.dot(np.dot(np.dot(B[:,:,j],gamma),P6[:,:,j]),gamma[n,:])
tmp_1 = np.dot(np.dot(gamma[n,:],P5),gamma[n,:])
tmp = np.dot(gamma,np.dot(gamma[n,:],P2))-np.dot(np.dot(np.dot(A,gamma),P5),gamma[n,:])-tmp_0-np.dot(np.dot(C,P7),gamma[n,:])-D[:,0]*np.dot(gamma[n,:],P9[0,:])+A[:,n]*tmp_1
return cd_thre(tmp,tmp_1,mu)
def update_B(n,j,gamma,A,B,C,D,mu):
tmp_0 = 0
for l in range(J):
tmp_0 = tmp_0 + np.dot(np.dot(np.dot(B[:,:,l],gamma),P10[:,:,l,j]),gamma[n,:])
tmp_1 = np.dot(np.dot(gamma[n,:],P10[:,:,j,j]),gamma[n,:])
tmp = np.dot(gamma,np.dot(gamma[n,:],P3[:,:,j]))-np.dot(np.dot(np.dot(A,gamma),np.transpose(P6[:,:,j])),gamma[n,:])-tmp_0-np.dot(np.dot(C,P13[:,:,j]),gamma[n,:])-D[:,0]*np.dot(gamma[n,:],P11[j,:])+B[:,n,j]*tmp_1
return cd_thre(tmp,tmp_1,mu)
def update_C(n,gamma,A,B,C,D,mu):
tmp_0 = 0
for j in range(J):
tmp_0 = tmp_0+np.dot(np.dot(B[:,:,j],gamma),P13[n,:,j])
tmp_1 = P14[n,n]
tmp = np.dot(gamma,P4[n,:])-np.dot(np.dot(A,gamma),P7[n,:])-tmp_0-np.dot(C,P14[n,:])-D[:,0]*P15[0,n]+C[:,n]*tmp_1
return cd_thre(tmp,tmp_1,mu)
def update_D(gamma,A,B,C):
tmp = np.dot(gamma,np.transpose(P8))-np.dot(np.dot(A,gamma),np.transpose(P9))
for j in range(J):
tmp = tmp-np.dot(np.dot(B[:,:,j],gamma),P11[j,:]).reshape((-1,1))
tmp = tmp - np.dot(C,np.transpose(P15))
return tmp*1.0/t_T
def likelihood(gamma, A, B, C, D, lam, mu, mu_1, mu_2, lam_1, p_t=False):
e1 = np.sum((y-np.dot(gamma,np.transpose(P12)))**2)
e2 = 0
tmp_0=0
for j in range(J):
tmp_0 = tmp_0 + np.dot(np.dot(B[:,:,j],gamma),Q3[:,:,j])
tmp = np.dot(gamma,Q1)-np.dot(np.dot(A,gamma),Q2)-tmp_0- | np.dot(C,Q4) | numpy.dot |
"""
Example of how a multivariate linear regression problem can be solved with the
package.
"""
import numpy as np
from gustavgrad import Tensor
# 100 training examples with 3 features
x = Tensor(np.random.rand(100, 3))
# The function we want to learn
coefs = Tensor([1.0, 3.0, 5.0])
bias = 2
y = x @ coefs + bias
# Our model
w = Tensor(np.random.randn(3), requires_grad=True)
b = Tensor(np.random.rand(), requires_grad=True)
# Train the model
lr = 0.001
batch_size = 25
for _ in range(1000):
# Train in batches
idx = | np.arange(x.shape[0]) | numpy.arange |
import numpy as np
import torch
import torch.nn.functional as F
import torchvision
import PIL
import itertools
import datetime
import random
import skimage
from skimage import filters
def noise_permute(datapoint):
"""Permutes the pixels of an img and assigns the label (label, 'permuted').
The input should be an image (PIL, others like numpy arrays might work, too) with a label.
The returned image is a PIL image.
It is assumed that img has 3 dimensions, the last of which is the color channels.
"""
img, label = datapoint
imgn = np.transpose(img.numpy(), (1,2,0))
assert len(imgn.shape) == 3 and imgn.shape[2] <=4, 'Unexpected image dimensions.'
imgn_flat = imgn.reshape(imgn.shape[0]*imgn.shape[1], imgn.shape[2])
imgn_flat_permuted = np.random.permutation(imgn_flat) #this function shuffles the first axis
imgn_permuted = imgn_flat_permuted.reshape(imgn.shape)
return torch.from_numpy(np.transpose(imgn_permuted, (2,0,1))), label #(label, 'permuted')
def filter_gauss(datapoint, srange=[1,1]):
img, label = datapoint
imgn = np.transpose(img.numpy(), (1,2,0))
sigma = srange[0] + np.random.random_sample()*(srange[1]-srange[0])
imgn_gaussed = skimage.filters.gaussian(imgn, sigma=sigma, multichannel=3)
return torch.from_numpy(np.transpose(imgn_gaussed, (2,0,1))), label #+ ('gauss', sigma)
def gaussed_noise_perm(x):
x = noise_permute(x)
x = filter_gauss(x, srange=[0.25,1.25])
return x
def scale_full_range(datapoint):
img_in = datapoint[0]
img_0_based = img_in - img_in.min()
img_scaled = img_0_based/(img_0_based.max())
return img_scaled, datapoint[1]
def noise_uniform(datapoint):
"""Returns uniform noise with the same shape as the input.
The input should be an image (PIL, others like numpy arrays might work, too) with a label.
The returned image is a PIL image.
It is assumed that img has 3 dimensions, the last of which is the color channels.
"""
img, label = datapoint
assert len(img.shape) == 3, 'Unexpected image dimensions:' + str(img.shape)
imgn = np.transpose(img.numpy(), (1,2,0))
if imgn.shape[2] != 1:
assert imgn.shape[2] == 3, 'Unexpected last image dimensions:' + str(imgn.shape)
imgn_random = np.float32(np.random.uniform(size=imgn.shape))
return torch.from_numpy(np.transpose(imgn_random, (2,0,1))), label
else:
imgn_random = np.float32(np.random.uniform(size=imgn.shape))
assert torch.from_numpy(np.transpose(imgn_random, (2,0,1))).shape == img.shape, 'torch.from_numpy(np.transpose(imgn_random, (2,0,1))).shape wrong: ' + str(torch.from_numpy(np.transpose(imgn_random, (2,0,1))).shape)
return torch.from_numpy(np.transpose(imgn_random, (2,0,1))), label
def noise_low_freq(datapoint):
uniform = noise_uniform(datapoint)
gaussed = filter_gauss(uniform, srange=[1,2.5])
low_freq = scale_full_range(gaussed)
return low_freq
def identity(datapoint):
return datapoint
class monochrome:
def __init__(self, color):
super().__init__()
self.color = color
def __call__(self, datapoint):
img, label = datapoint
assert len(img.shape) == 3, 'Unexpected image dimensions:' + str(img.shape)
imgn = np.transpose(img.numpy(), (1,2,0))
imgn_monochrome = np.float32(self.color* | np.ones(imgn.shape) | numpy.ones |
##########################################################
# lane_detector.py
#
# SPDX-FileCopyrightText: Copyright 2021 <NAME>
#
# SPDX-License-Identifier: MIT
#
# Lane detection techniques
#
# ########################################################
#
# Import libraries
import cv2
import numpy as np
import math
from collections import deque
class LaneDetector:
def __init__(self, is_video=False, width=1280, height=720, draw_area = True, queue_len=10):
# Roi
self.vertices = None
# Video pipline
self.is_video = is_video
# Frame dimension
self.width = width
self.height = height
# Draw
self.draw_area_err = True
self.draw_area = draw_area
self.road_color = (204, 255, 153)
self.l_lane_color = (0, 0, 255)
self.r_lane_color = (255, 0, 0)
self.lane_thickness = 30
# Lane search
self.n_windows = 9
self.margin = 100
self.nb_margin = 100
self.px_threshold = 50
self.radii_threshold = 10
self.min_lane_dis = 600
# Current lanes and radii
self.l_curr_fit = None
self.r_curr_fit = None
self.l_diff_fit = 0
self.r_diff_fit = 0
self.l_curr_cr = 0
self.r_curr_cr = 0
self.lost_track = 0
self.lost_radii = 0
self.poly_thr_a = 0.001
self.poly_thr_b = 0.4
self.poly_thr_c = 150
# Convert px to meter
self.px_to_m_y = 30/720 # meters per pixel in y dimension
self.px_to_m_x = 3.7/700 # meters per pixel in x dimension
# Averaging
self.queue_len = queue_len
self.l_fit_que = deque(maxlen=self.queue_len)
self.r_fit_que = deque(maxlen=self.queue_len)
self.l_rad_que = deque(maxlen=self.queue_len)
self.r_rad_que = deque(maxlen=self.queue_len)
self.weights = np.arange(1, self.queue_len + 1) / self.queue_len
# No Text on frame
self.no_text = False
""" General methods for setting files and getting information """
def set_vertices(self, vertices):
self.vertices = vertices
def reset_detector(self):
self.empty_queue()
self.vertices = None
self.l_curr_fit = None
self.r_curr_fit = None
self.l_diff_fit = 0
self.r_diff_fit = 0
self.l_curr_cr = 0
self.r_curr_cr = 0
self.lost_track = 0
self.lost_radii = 0
def empty_queue(self):
self.l_fit_que = deque(maxlen=self.queue_len)
self.r_fit_que = deque(maxlen=self.queue_len)
self.l_rad_que = deque(maxlen=self.queue_len)
self.r_rad_que = deque(maxlen=self.queue_len)
""" Find lanes """
def calculate_histogram(self, frame):
return np.sum(frame, axis=0)
def get_hist_peaks(self, histogram):
center = np.int(histogram.shape[0]//2)
left_peak = np.argmax(histogram[:center])
right_peak = np.argmax(histogram[center:]) + center
return left_peak, right_peak
def cr_to_degree(self, cr, arc_length):
dc = (180 * arc_length) / (math.pi * cr)
return dc/2
def find_lanes(self, frame):
self.check_track()
if self.l_curr_fit is None or self.r_curr_fit is None:
self.empty_queue()
histogram = self.calculate_histogram(frame)
left_peak, right_peak = self.get_hist_peaks(histogram)
leftx, lefty, rightx, righty = self.sliding_window(frame, left_peak, right_peak)
left_fit, right_fit = self.fit_polynomial(leftx, lefty, rightx, righty)
left_fit_cr, right_fit_cr = self.fit_polynomial(
leftx * self.px_to_m_x, lefty * self.px_to_m_y,
rightx * self.px_to_m_x, righty * self.px_to_m_y)
# Get radii of lane curvature
left_rad, right_rad = self.calculate_poly_radii(frame, left_fit_cr, right_fit_cr)
self.r_curr_cr = left_rad
self.l_curr_cr = right_rad
self.r_curr_fit = right_fit
self.l_curr_fit = left_fit
self.l_fit_que.append(left_fit)
self.r_fit_que.append(right_fit)
self.l_rad_que.append(left_rad)
self.r_rad_que.append(right_rad)
else:
left_fit, right_fit, left_fit_cr, right_fit_cr, _ = self.nearby_search(
frame,
np.average(self.l_fit_que, 0, self.weights[-len(self.l_fit_que):]),
np.average(self.r_fit_que, 0, self.weights[-len(self.r_fit_que):]))
self.l_fit_que.append(left_fit)
self.r_fit_que.append(right_fit)
avg_rad = round(np.mean([np.average(self.r_rad_que, 0, self.weights[-len(self.r_rad_que):]),
np.average(self.l_rad_que, 0, self.weights[-len(self.l_rad_que):])]),0)
try:
return (self.draw_lanes(frame,
np.average(self.l_fit_que, 0, self.weights[-len(self.l_fit_que):]),
np.average(self.r_fit_que, 0, self.weights[-len(self.r_fit_que):])),
avg_rad)
except:
return (np.zeros_like(cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)), None)
def sliding_window(self, frame, left_peak, right_peak):
# Set window height
window_height = np.int(frame.shape[0]//self.n_windows)
# Find non-zero values
nonzero = frame.nonzero()
nonzero_y = np.array(nonzero[0])
nonzero_x = np.array(nonzero[1])
# Current positions to be updated later for each window in n_windows
leftx_current = left_peak
rightx_current = right_peak
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(self.n_windows):
# Identify window boundaries in x and y (and right and left)
win_y_low = frame.shape[0] - (window + 1) * window_height
win_y_high = frame.shape[0] - window * window_height
# Find the four below boundaries of the window
win_xleft_low = leftx_current - self.margin
win_xleft_high = leftx_current + self.margin
win_xright_low = rightx_current - self.margin
win_xright_high = rightx_current + self.margin
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzero_y >= win_y_low ) & (nonzero_y < win_y_high) &\
(nonzero_x >= win_xleft_low) & (nonzero_x < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzero_y >= win_y_low ) & (nonzero_y < win_y_high) &\
(nonzero_x >= win_xright_low) & (nonzero_x < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > px_threshold pixels, recenter next window
# (`right` or `leftx_current`) on their mean position
if len(good_left_inds) > self.px_threshold:
leftx_current = np.int(np.mean(nonzero_x[good_left_inds]))
if len(good_right_inds) > self.px_threshold:
rightx_current = np.int(np.mean(nonzero_x[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzero_x[left_lane_inds]
lefty = nonzero_y[left_lane_inds]
rightx = nonzero_x[right_lane_inds]
righty = nonzero_y[right_lane_inds]
return leftx, lefty, rightx, righty
def calculate_poly_radii(self, frame, left_fit, right_fit):
frame_height = np.linspace(0, frame.shape[0] - 1, frame.shape[0])
max_px_window = np.max(frame_height)
try:
left_rad = ((1 + (2 * left_fit[0] * max_px_window * self.px_to_m_y + left_fit[1])**2)**1.5) / np.absolute(2 * left_fit[0])
right_rad = ((1 + (2 * right_fit[0] * max_px_window * self.px_to_m_y + right_fit[1])**2)**1.5) / np.absolute(2 * right_fit[0])
if math.isinf(left_rad) or math.isinf(right_rad):
return self.l_curr_cr, self.r_curr_cr
except:
return self.l_curr_cr, self.r_curr_cr
return int(left_rad), int(right_rad)
def check_radii(self, left_rad, right_rad):
avg_l = np.average(self.l_rad_que, 0, self.weights[-len(self.l_rad_que):])
avg_r = np.average(self.r_rad_que, 0, self.weights[-len(self.r_rad_que):])
abs_l__diff = np.absolute(avg_l - left_rad)
abs_r__diff = np.absolute(avg_r - right_rad)
if abs_l__diff > (avg_l / self.radii_threshold) and self.lost_radii < 5 and abs_r__diff > (avg_r / self.radii_threshold):
self.lost_radii += 1
return False
else:
self.lost_radii = 0
return True
def fit_polynomial(self, leftx, lefty, rightx, righty):
try:
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
except:
# Empty vector
left_fit = self.l_curr_fit
self.draw_area_err = False
try:
right_fit = np.polyfit(righty, rightx, 2)
except:
# Empty vector
right_fit = self.r_curr_fit
self.draw_area_err = False
return left_fit, right_fit
def insert_direction(self, frame, avg_rad):
if not self.no_text:
cv2.putText(frame, 'Curvature radius: {:.2f} m'.format(avg_rad),
(50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
else:
self.no_text = False
def insert_fps(self, frame, fps):
cv2.putText(frame, 'FPS: {}'.format(int(fps)),
(50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2)
def check_track(self):
if self.lost_track > 5:
print('Reset tracks')
self.l_curr_fit = None
self.r_curr_fit = None
self.lost_track = 0
self.no_text = True
def draw_lanes(self, warped_frame, left_fit, right_fit):
# Convert to 3 channels
frame_3channel = cv2.cvtColor(np.zeros_like(warped_frame), cv2.COLOR_GRAY2BGR)
# Generate axis for polynomial
frame_height = np.linspace(0, frame_3channel.shape[0] - 1, frame_3channel.shape[0])
# Frames to save results
lanes = | np.zeros_like(frame_3channel) | numpy.zeros_like |
from __future__ import print_function
import numpy as np
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
from keras.optimizers import Adam
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
working_path = "/home/qwerty/data/luna16/output/"
K.set_image_dim_ordering('th') # Theano dimension ordering in this code
img_rows = 512
img_cols = 512
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_np(y_true,y_pred):
y_true_f = y_true.flatten()
y_pred_f = y_pred.flatten()
intersection = np.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (np.sum(y_true_f) + np.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def get_unet():
inputs = Input((1,img_rows, img_cols))
conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(inputs)
conv1 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(pool1)
conv2 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(pool2)
conv3 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(pool3)
conv4 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(pool4)
conv5 = Convolution2D(512, (3, 3), activation='relu', border_mode='same')(conv5)
up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(up6)
conv6 = Convolution2D(256, (3, 3), activation='relu', border_mode='same')(conv6)
up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(up7)
conv7 = Convolution2D(128, (3, 3), activation='relu', border_mode='same')(conv7)
up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(up8)
conv8 = Convolution2D(64, (3, 3), activation='relu', border_mode='same')(conv8)
up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(up9)
conv9 = Convolution2D(32, (3, 3), activation='relu', border_mode='same')(conv9)
conv10 = Convolution2D(1, (1, 1), activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=Adam(lr=1.0e-5), loss=dice_coef_loss, metrics=[dice_coef])
return model
def train_and_predict(use_existing):
print('-'*30)
print('Loading and preprocessing train data...')
print('-'*30)
imgs_train = np.load(working_path+"trainImages.npy").astype(np.float32)
imgs_mask_train = np.load(working_path+"trainMasks.npy").astype(np.float32)
imgs_test = np.load(working_path+"testImages.npy").astype(np.float32)
imgs_mask_test_true = | np.load(working_path+"testMasks.npy") | numpy.load |
import json
import torch
import numpy as np
import argparse
import pickle
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
from scipy.stats import spearmanr, pearsonr
from transformers import BertModel, BertTokenizer, GPT2Tokenizer, GPT2LMHeadModel
torch.manual_seed(2000)
np.random.seed(2000)
def compute_fm_score(x, y):
return max([x,y]) / min([x,y])
dataset_meta_info ={
'fed-dial': {
'annotations': ['Coherent', 'Error recovery', 'Consistent', 'Diverse', 'Depth', 'Likeable', 'Understanding', 'Flexible', 'Informative', 'Inquisitive', 'Overall'],
'aggregation':np.mean},
'persona-see': {
'annotations': ['enjoy', 'interest', 'listen', 'turing', 'avoid_rep', 'make_sense', 'fluency', 'inquisitive', 'persona_guess'],
'aggregation':np.mean},
}
def normalize_df(dataset_name, df, ds_meta):
dataset_meta = ds_meta[dataset_name]
for annotation in dataset_meta['annotations']:
df['annotations.' + annotation] = df['annotations.' + annotation].apply(dataset_meta['aggregation'])
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='persona-see')
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--am_model_path', type=str, default='embedding_models/persona_am/')
parser.add_argument('--fm_model_path', type=str, default='language_models/persona_fm')
parser.add_argument('--criterion', nargs='+')
args = parser.parse_args()
print(args)
globals().update(args.__dict__)
bert_model = BertModel.from_pretrained(am_model_path).to(device)
bert_tokenizer = BertTokenizer.from_pretrained(am_model_path)
bert_model.eval()
gpt2_tokenizer = GPT2Tokenizer.from_pretrained(fm_model_path)
gpt2_model = GPT2LMHeadModel.from_pretrained(fm_model_path).to(device)
gpt2_model.eval()
with open('../../human_evaluation_data/{}_eval.json'.format(dataset)) as f:
df = pd.json_normalize(json.load(f))
human_scores = {}
annotations = ["annotations." + _ for _ in dataset_meta_info[dataset]["annotations"]]
for k in annotations:
human_scores[k] = list(df[k])
df = normalize_df(dataset, df, dataset_meta_info)
dialog_list = df.dialog.to_list()
full_human_model_pairs = []
for whole_dialog in dialog_list:
human_model_pairs = []
for idx, utt in enumerate(whole_dialog):
if utt['speaker'] == 'model' and idx != 0:
prev_utt = whole_dialog[idx-1]
human_model_pairs.append((prev_utt['text'], utt['text']))
full_human_model_pairs.append(human_model_pairs)
# to handle the missing annotations for error recovery category
new_human_scores = {}
for k, v in human_scores.items():
new_human_scores[k] = []
for item in v:
if 'Error recovery' in k:
if len(item) == 0:
new_human_scores[k].append((False, 0))
else:
new_human_scores[k].append((True, np.mean(item)))
else:
new_human_scores[k].append((True, np.mean(item)))
am_scores_dialog_level = []
with torch.no_grad():
for dialog in tqdm(full_human_model_pairs):
am_scores_turn_level = []
for prev, cur in dialog:
prev_inputs = {k:v.to(device) for k, v in bert_tokenizer(prev, return_tensors="pt").items()}
cur_inputs = {k:v.to(device) for k, v in bert_tokenizer(cur, return_tensors="pt").items()}
prev_outputs = bert_model(**prev_inputs, return_dict=True)
cur_outputs = bert_model(**cur_inputs, return_dict=True)
prev_pooler_output = prev_outputs.pooler_output.cpu().numpy()
cur_pooler_output = cur_outputs.pooler_output.cpu().numpy()
am_scores_turn_level.append(cosine_similarity(prev_pooler_output, cur_pooler_output)[0][0])
am_scores_dialog_level.append(np.mean(am_scores_turn_level))
cutoff = np.quantile(am_scores_dialog_level, 0.05)
modified_rating = np.array([cutoff if t < cutoff else t for t in am_scores_dialog_level])
normed_am_scores_dialog_level = (modified_rating - cutoff) / | np.abs(cutoff) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 09:25:46 2017
@author: ben
"""
import numpy as np
import scipy.sparse as sp
from LSsurf.fd_grid import fd_grid
class lin_op:
def __init__(self, grid=None, row_0=0, col_N=None, col_0=None, name=None):
# a lin_op is an operator that represents a set of linear equations applied
# to the nodes of a grid (defined in fd_grid.py)
if col_0 is not None:
self.col_0=col_0
elif grid is not None:
self.col_0=grid.col_0
self.col_N=None
if col_N is not None:
self.col_N=col_N
elif grid is not None:
self.col_N=grid.col_N
self.row_0=row_0
self.N_eq=0
self.name=name
self.id=None
self.r=np.array([], dtype=int)
self.c=np.array([], dtype=int)
self.v=np.array([], dtype=float)
self.ind0=np.zeros([0], dtype=int)
self.TOC={'rows':dict(),'cols':dict()}
self.grid=grid
self.dst_grid=None
self.dst_ind0=None
self.expected=None
self.shape=None
self.size=None
def __update_size_and_shape__(self):
self.shape = (self.N_eq, self.col_N)
def diff_op(self, delta_subs, vals, which_nodes=None, valid_equations_only=True):
# build an operator that calculates linear combination of the surrounding
# values at each node of a grid.
# A template, given by delta_subs and vals contains a list of offsets
# in each direction of the grid, and a list of values corresponding
# to each offset. Only those nodes for which the template falls
# entirely inside the grid are included in the operator
if valid_equations_only:
# compute the maximum and minimum offset in each dimension. These
# will be used to eliminate equations that extend outside the model
# domain
max_deltas=[np.max(delta_sub) for delta_sub in delta_subs]
min_deltas=[np.min(delta_sub) for delta_sub in delta_subs]
else:
# treat the maximum and minimum offset in each dimension as zero,
# so no equations are truncated
max_deltas=[0 for delta_sub in delta_subs]
min_deltas=[0 for delta_sub in delta_subs]
#generate the center-node indices for each calculation
# if in dimension k, min_delta=-a and max_delta = +b, the number of indices is N,
# then the first valid center is a and the last is N-b
sub0s=np.meshgrid(*[np.arange(np.maximum(0, -min_delta), np.minimum(Ni, Ni-max_delta)) for Ni, min_delta, max_delta in zip(self.grid.shape, min_deltas, max_deltas)], indexing='ij')
sub0s=[sub.ravel() for sub in sub0s]
if which_nodes is not None:
temp_mask=np.in1d(self.grid.global_ind(sub0s), which_nodes)
sub0s=[temp[temp_mask] for temp in sub0s]
self.r, self.c=[np.zeros((len(sub0s[0]), len(delta_subs[0])), dtype=int) for _ in range(2)]
self.v=np.zeros_like(self.r, dtype=float)
self.N_eq=len(sub0s[0])
# loop over offsets
for ii in range(len(delta_subs[0])):
# build a list of subscripts over dimensions
this_sub=[sub0+delta[ii] for sub0, delta in zip(sub0s, delta_subs)]
self.r[:,ii]=self.row_0+np.arange(0, self.N_eq, dtype=int)
if valid_equations_only:
self.c[:,ii]=self.grid.global_ind(this_sub)
self.v[:,ii]=vals[ii].ravel()
else:
# need to remove out-of-bound subscripts
self.c[:,ii], valid_ind=self.grid.global_ind(this_sub, return_valid=True)
self.v[:,ii]=vals[ii].ravel()*valid_ind.ravel()
#if not valid_equations_only: [Leave this commented until it causes a problem]
# # remove the elements that have v=0
# nonzero_v = self.v.ravel() != 0
# self.r = self.r.ravel()[nonzero_v]
# self.c = self.c.ravel()[nonzero_v]
# self.v = self.v.ravel()[nonzero_v]
self.ind0 = self.grid.global_ind(sub0s).ravel()
self.TOC['rows'] = {self.name:range(self.N_eq)}
self.TOC['cols'] = {self.grid.name:np.arange(self.grid.col_0, self.grid.col_0+self.grid.N_nodes)}
self.__update_size_and_shape__()
return self
def add(self, op):
# combine a set of operators into a composite operator by adding them.
# the same thing could be accomplished by converting the operators to
# sparse arrays and adding the arrays, but this method keeps track of the
# table of contents for the operators.
# if a list of operators is provided, all are added together, or a single
# operator can be added to an existing operator.
if isinstance(op, list) or isinstance(op, tuple):
for this_op in op:
op.add(self, this_op)
return self
if self.r is not None:
self.r=np.append(self.r, op.r)
self.c=np.append(self.c, op.c)
self.v=np.append(self.v, op.v)
self.ind0=np.append(self.ind0, op.ind0)
else:
self.r=op.r
self.c=op.c
self.v=op.v
self.ind0=op.ind0
# assume that the new op may have columns that aren't in self.cols, and
# add any new columns to the table of contents
for key in op.TOC['cols'].keys():
self.TOC['cols'][key]=op.TOC['cols'][key]
self.col_N=np.maximum(self.col_N, op.col_N)
self.__update_size_and_shape__()
return self
def interp_mtx(self, pts):
# create a matrix that, when it multiplies a set of nodal values,
# gives the bilinear interpolation between those nodes at a set of
# data points
pts=[pp.ravel() for pp in pts]
# Identify the nodes surrounding each data point
# The floating-point subscript expresses the point locations in terms
# of their grid positions
ii=self.grid.float_sub(pts)
cell_sub=self.grid.cell_sub_for_pts(pts)
# calculate the fractional part of each cell_sub
i_local=[a-b for a, b in zip(ii,cell_sub)]
# find the index of the node below each data point
global_ind=self.grid.global_ind(cell_sub)
# make a list of dimensions based on the dimensions of the grid
if self.grid.N_dims==1:
list_of_dims=np.mgrid[0:2]
elif self.grid.N_dims==2:
list_of_dims=np.mgrid[0:2, 0:2]
elif self.grid.N_dims==3:
list_of_dims=np.mgrid[0:2, 0:2, 0:2]
delta_ind=np.c_[[kk.ravel() for kk in list_of_dims]]
n_neighbors=delta_ind.shape[1]
Npts=len(pts[0])
rr=np.zeros([Npts, n_neighbors], dtype=int)
cc=np.zeros([Npts, n_neighbors], dtype=int)
vv= np.ones([Npts, n_neighbors], dtype=float)
# make lists of row and column indices and weights for the nodes
for ii in range(n_neighbors):
rr[:,ii]=np.arange(len(pts[0]), dtype=int)
cc[:,ii]=global_ind+np.sum(self.grid.stride*delta_ind[:,ii])
for dd in range(self.grid.N_dims):
if delta_ind[dd, ii]==0:
vv[:,ii]*=(1.-i_local[dd])
else:
vv[:,ii]*=i_local[dd]
self.r=rr
self.c=cc
self.v=vv
self.N_eq=Npts
# in this case, sub0s is the index of the data points
self.ind0=np.arange(0, Npts, dtype='int')
# report the table of contents
self.TOC['rows']={self.name:np.arange(self.N_eq, dtype='int')}
self.TOC['cols']={self.grid.name:np.arange(self.grid.col_0, self.grid.col_0+self.grid.N_nodes)}
self.__update_size_and_shape__()
return self
def grad(self, DOF='z'):
coeffs=np.array([-1., 1.])/(self.grid.delta[0])
dzdx=lin_op(self.grid, name='d'+DOF+'_dx').diff_op(([0, 0],[-1, 0]), coeffs)
dzdy=lin_op(self.grid, name='d'+DOF+'_dy').diff_op(([-1, 0],[0, 0]), coeffs)
self.vstack((dzdx, dzdy))
self.__update_size_and_shape__()
return self
def grad_dzdt(self, DOF='z', t_lag=1):
coeffs=np.array([-1., 1., 1., -1.])/(t_lag*self.grid.delta[0]*self.grid.delta[2])
d2zdxdt=lin_op(self.grid, name='d2'+DOF+'_dxdt').diff_op(([ 0, 0, 0, 0], [-1, 0, -1, 0], [-t_lag, -t_lag, 0, 0]), coeffs)
d2zdydt=lin_op(self.grid, name='d2'+DOF+'_dydt').diff_op(([-1, 0, -1, 0], [ 0, 0, 0, 0], [-t_lag, -t_lag, 0, 0]), coeffs)
self.vstack((d2zdxdt, d2zdydt))
self.__update_size_and_shape__()
return self
def diff(self, lag=1, dim=0):
coeffs=np.array([-1., 1.])/(lag*self.grid.delta[dim])
deltas=[[0, 0] for this_dim in range(self.grid.N_dims)]
deltas[dim]=[0, lag]
self.diff_op((deltas), coeffs)
self.__update_size_and_shape__()
return self
def dzdt(self, lag=1, DOF='dz'):
coeffs=np.array([-1., 1.])/(lag*self.grid.delta[2])
self.diff_op(([0, 0], [0, 0], [0, lag]), coeffs)
self.__update_size_and_shape__()
self.update_dst_grid([0, 0, 0.5*lag*self.grid.delta[2]], np.array([1, 1, 1]))
return self
def d2z_dt2(self, DOF='dz', t_lag=1):
coeffs=np.array([-1, 2, -1])/((t_lag*self.grid.delta[2])**2)
self=lin_op(self.grid, name='d2'+DOF+'_dt2').diff_op(([0,0,0], [0,0,0], [-t_lag, 0, t_lag]), coeffs)
self.__update_size_and_shape__()
return self
def grad2(self, DOF='z'):
coeffs=np.array([-1., 2., -1.])/(self.grid.delta[0]**2)
d2zdx2=lin_op(self.grid, name='d2'+DOF+'_dx2').diff_op(([0, 0, 0],[-1, 0, 1]), coeffs)
d2zdy2=lin_op(self.grid, name='d2'+DOF+'_dy2').diff_op(([-1, 0, 1],[0, 0, 0]), coeffs)
d2zdxdy=lin_op(self.grid, name='d2'+DOF+'_dxdy').diff_op(([-1, -1, 1,1],[-1, 1, -1, 1]), 0.5*np.array([-1., 1., 1., -1])/(self.grid.delta[0]**2))
self.vstack((d2zdx2, d2zdy2, d2zdxdy))
self.__update_size_and_shape__()
return self
def grad2_dzdt(self, DOF='z', t_lag=1):
coeffs=np.array([-1., 2., -1., 1., -2., 1.])/(t_lag*self.grid.delta[0]**2.*self.grid.delta[2])
d3zdx2dt=lin_op(self.grid, name='d3'+DOF+'_dx2dt').diff_op(([0, 0, 0, 0, 0, 0],[-1, 0, 1, -1, 0, 1], [-t_lag,-t_lag,-t_lag, 0, 0, 0]), coeffs)
d3zdy2dt=lin_op(self.grid, name='d3'+DOF+'_dy2dt').diff_op(([-1, 0, 1, -1, 0, 1], [0, 0, 0, 0, 0, 0], [-t_lag, -t_lag, -t_lag, 0, 0, 0]), coeffs)
coeffs=np.array([-1., 1., 1., -1., 1., -1., -1., 1.])/(self.grid.delta[0]**2*self.grid.delta[2])
d3zdxdydt=lin_op(self.grid, name='d3'+DOF+'_dxdydt').diff_op(([-1, 0, -1, 0, -1, 0, -1, 0], [-1, -1, 0, 0, -1, -1, 0, 0], [-t_lag, -t_lag, -t_lag, -t_lag, 0, 0, 0, 0]), coeffs)
self.vstack((d3zdx2dt, d3zdy2dt, d3zdxdydt))
self.__update_size_and_shape__()
return self
def normalize_by_unit_product(self, wt=1):
# normalize an operator by its magnitude's product with a vector of ones.
# optionally rescale the result by a factor of wt
unit_op=lin_op(col_N=self.col_N)
unit_op.N_eq=self.N_eq
unit_op.r, unit_op.c, unit_op.v = [self.r, self.c, np.abs(self.v)]
unit_op.__update_size_and_shape__()
norm = unit_op.toCSR(row_N=unit_op.N_eq).dot(np.ones(self.shape[1]))
scale = np.zeros_like(norm)
scale[norm>0] = 1./norm[norm>0]
self.v *= scale[self.r]*wt
def mean_of_bounds(self, bds, mask=None):
# make a linear operator that calculates the mean of all points
# in its grid that fall within bounds specified by 'bnds', If an
# empty matrix is specified for a dimension, the entire dimension is
# included.
# optionally, a 'mask' variable can be used to select from within the
# bounds.
coords=np.meshgrid(*self.grid.ctrs, indexing='ij')
in_bds=np.ones_like(coords[0], dtype=bool)
for dim, bnd in enumerate(bds):
if bds[dim] is not None:
in_bds=np.logical_and(in_bds, np.logical_and(coords[dim]>=bnd[0], coords[dim] <= bnd[1]));
if mask is not None:
in_bds=np.logical_and(in_bds, mask)
self.c=self.grid.global_ind(np.where(in_bds))
self.r=np.zeros(in_bds.ravel().sum(), dtype=int)
self.v=np.ones(in_bds.ravel().sum(), dtype=float)/np.sum(in_bds.ravel())
self.TOC['rows']={self.name:self.r}
self.TOC['cols']={self.name:self.c}
self.N_eq=1.
self.__update_size_and_shape__()
return self
def mean_of_mask(self, mask, dzdt_lag=None):
# make a linear operator that takes the mean of points multiplied by
# a 2-D mask. If the grid has a time dimension, the operator takes the
# mean for each time slice. If dzdt_lags are provided, it takes the
# mean dzdt as a function of time
coords=np.meshgrid(*self.grid.ctrs[0:2], indexing='ij')
mask_g=mask.interp(coords[1], coords[0])
mask_g[~np.isfinite(mask_g)]=0
i0, j0 = | np.nonzero(mask_g) | numpy.nonzero |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 26 11:38:14 2021
@author: christian
"""
from astropy import constants as const
from astropy.io import fits
from astropy.convolution import Gaussian1DKernel, convolve
import datetime as dt
import math
import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, AutoMinorLocator
import numpy as np
from scipy.optimize import curve_fit
import scipy.stats as stats
from spectres import spectres
from tqdm import tqdm
import unyt as u
import warnings
def add_weight(line_pos, line_wid, w, err, pix_wid):
"""Lines up the two spectra by the amount of light absorpted in the area
around the line.
Parameters
----------
line_pos : float
The position of the absorption line.
line_wid : float
The width of the absorption line.
w : Array like
A subarray with wavelength values around the line.
err : Array like
The corresponding error array.
pix_wid : float
The width of a pixel in wavelength.
Returns
-------
Two variable:
weight : Array like
An array that weights the corresponding flux values for the
wavelength array w.
"""
i = 0
j = -1
npix = len(w)
# Initially every pixel is weighted by their inverse variance
weight = np.divide(np.ones(len(w)), np.square(err))
# Pixel at a lower wavelength than the specified window have weight = 0
while w[i] + pix_wid / 2 < line_pos - line_wid:
weight[i] = 0.0
i += 1
npix -= 1
# Pixel at a higher wavelength than the specified window have weight = 0
while w[j] - pix_wid / 2 > line_pos + line_wid:
weight[j] = 0.0
j -= 1
npix -= 1
# The pixels on the edge of the window have a reduced weight according to
# their width within the window.
weight[i] = weight[i] * (w[i] + pix_wid / 2 -
line_pos + line_wid) / pix_wid
weight[j] = weight[j] * (pix_wid / 2 +
line_pos + line_wid - w[j]) / pix_wid
# Number of pixels within the window takes into account fractions of pixels
npix = npix - 2.0 + (pix_wid / 2 + line_pos + line_wid - w[j]) / \
pix_wid + (w[i] + pix_wid / 2 - line_pos + line_wid) / pix_wid
# Normalising the weight by the heighest weight
weight = np.divide(weight, max(weight))
return weight, npix
def addSN(flux, time, vmag, DarkN, SkyN, n, norm_f, Boff=0.654, Roff=-0.352,
Ioff=-0.7, HARSN=1000, HAR=False):
"""Adds noice to the inserted flux. The noise is dependent on the
brightness of the target, the observation time, the dark noice and the
sky noice. It simulates noice for a solar star. This simulates noise for
a HERMES spectrum according to the capabilities of the spectrograph and
telescope.
Parameters
----------
flux : Array like
An array holding the flux.
time : float
Observing time (s).
vmag : float
Brightness in the V band (mag).
DarkN : float
Dark noise total photon count.
SkyN : float
Relative sky brightness.
n : int
Band identifier (0: B, 1: V, 2: R, 3: IR).
norm_f : Array like
Normalised flux array.
Boff : float
B band offset from V band (mag). Solar offset by default.
Roff : float
R band offset from V band (mag). Solar offset by default.
Ioff : float
IR band offset from V band (mag). Solar offset by default.
HARSN : float
Previous SNR in the original HARPS spectrum.
(negligible by default)
HAR : Boolean
Has been a HARPS spectrum before. Will take into account previous
noise of spectrum.
Returns
-------
A variable:
results : Library
Contains:
'SN' keyword for the resulting SN as a float
'SNpp' keyword for SN per pixel as a float
'e' keyword for the error numpy array
'f' keyword for the new flux array
"""
results = {}
# Determine the number of electrons observed in the specified band
if n == 0:
ne = time / 3600 * 10**(-0.4 * (0.993 * (vmag + Boff) - 24.05))
nepp = ne / 3.81 # number of measured electrons per pixel
# Find the SNR of the initial HARPS spectrum for the wavelength region.
# Increases SNR per pixel for HERMES cause of larger pixels
try:
harSN = min(HARSN[31:36]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 3.81 # HARPS SNR per HERMES pixel
elif n == 1:
ne = time / 3600 * 10**(-0.4*(1.18 * vmag - 26.25))
nepp = ne / 4.69
try:
harSN = min(HARSN[52:56]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 4.69
elif n == 2:
ne = time / 3600 * 10**(-0.4*(1.07 * (vmag + Roff) - 24.98))
nepp = ne / 3.74
try:
harSN = min(HARSN[66:70]) * 2
except TypeError:
harSN = HARSN * 2
harSNpp = harSN / 3.74
elif n == 3:
ne = time / 3600 * 10**(-0.4*(0.89 * (vmag + Ioff) - 22.33))
nepp = ne / 3.74
harSN = HARSN * 2
harSNpp = harSN / 3.74
# Calculate the SNR (and SNR per pixel) and the number of sky pixel.
skypp = SkyN * nepp * pow(2.5, vmag-17.552)
SN = np.sqrt(ne)
SNpp = math.sqrt(nepp + skypp)
# Compute results for HARPS spectra (calculate individual uncertainties and
# add random noise to the spectrum)
if HAR:
if harSN < SN:
results['e'] = np.abs(np.divide(flux,
np.sqrt(np.abs(norm_f))) / harSNpp)
results['f'] = flux + DarkN * flux / ne
results['SN'] = harSN
results['SNpp'] = harSNpp
else:
SNadd = 1/math.sqrt(1/(SNpp**2) + 1/(harSNpp**2))
adderr = flux / SNadd
results['f'] = np.add(flux, np.random.normal(0, adderr,
len(flux))) + DarkN * flux / ne
results['e'] = np.abs(np.divide(flux,
np.sqrt( | np.abs(norm_f) | numpy.abs |
import random
from typing import Optional, List, Union
import numpy as np
from stable_baselines.common.segment_tree import SumSegmentTree, MinSegmentTree
from stable_baselines.common.vec_env import VecNormalize
class ReplayBuffer(object):
__name__ = "ReplayBuffer"
def __init__(self, size: int, extra_data_names=()):
"""
Implements a ring buffer (FIFO).
:param size: (int) Max number of transitions to store in the buffer. When the buffer overflows the old
memories are dropped.
"""
self._storage = []
self._maxsize = int(size)
self._next_idx = 0
self._extra_data_names = sorted(extra_data_names)
def __len__(self) -> int:
return len(self._storage)
@property
def storage(self):
"""[(Union[np.ndarray, int], Union[np.ndarray, int], float, Union[np.ndarray, int], bool)]: content of the replay buffer"""
return self._storage
@property
def buffer_size(self) -> int:
"""float: Max capacity of the buffer"""
return self._maxsize
def can_sample(self, n_samples: int) -> bool:
"""
Check if n_samples samples can be sampled
from the buffer.
:param n_samples: (int)
:return: (bool)
"""
return len(self) >= n_samples
def is_full(self) -> int:
"""
Check whether the replay buffer is full or not.
:return: (bool)
"""
return len(self) == self.buffer_size
def add(self, obs_t, action, reward, obs_tp1, done, *extra_data, **extra_data_kwargs):
"""
add a new transition to the buffer
:param obs_t: (Union[np.ndarray, int]) the last observation
:param action: (Union[np.ndarray, int]) the action
:param reward: (float) the reward of the transition
:param obs_tp1: (Union[np.ndarray, int]) the current observation
:param done: (bool) is the episode done
"""
data = (obs_t, action, reward, obs_tp1, done, *extra_data,
*[extra_data_kwargs[k] for k in sorted(extra_data_kwargs)])
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def extend(self, obs_t, action, reward, obs_tp1, done):
"""
add a new batch of transitions to the buffer
:param obs_t: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the last batch of observations
:param action: (Union[Tuple[Union[np.ndarray, int]]], np.ndarray]) the batch of actions
:param reward: (Union[Tuple[float], np.ndarray]) the batch of the rewards of the transition
:param obs_tp1: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the current batch of observations
:param done: (Union[Tuple[bool], np.ndarray]) terminal status of the batch
Note: uses the same names as .add to keep compatibility with named argument passing
but expects iterables and arrays with more than 1 dimensions
"""
for data in zip(obs_t, action, reward, obs_tp1, done):
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
@staticmethod
def _normalize_obs(obs: np.ndarray,
env: Optional[VecNormalize] = None) -> np.ndarray:
"""
Helper for normalizing the observation.
"""
if env is not None:
return env.normalize_obs(obs)
return obs
@staticmethod
def _normalize_reward(reward: np.ndarray,
env: Optional[VecNormalize] = None) -> np.ndarray:
"""
Helper for normalizing the reward.
"""
if env is not None:
return env.normalize_reward(reward)
return reward
def _encode_sample(self, idxes: Union[List[int], np.ndarray], env: Optional[VecNormalize] = None):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
extra_data = {name: [] for name in self._extra_data_names}
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done, *extra_timestep_data = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
for data_i, extra_data_name in enumerate(self._extra_data_names):
data = extra_timestep_data[data_i]
if np.ndim(data) == 0:
extra_data[extra_data_name].append(data)
else:
extra_data[extra_data_name].append(np.array(data, copy=False))
extra_data = {k: np.array(v) for k, v in extra_data.items()}
return self._normalize_obs(np.array(obses_t), env), np.array(actions), \
self._normalize_reward(np.array(rewards), env), self._normalize_obs(np.array(obses_tp1), env), \
np.array(dones), extra_data
def sample(self, batch_size: int, env: Optional[VecNormalize] = None, **_kwargs):
"""
Sample a batch of experiences.
:param batch_size: (int) How many transitions to sample.
:param env: (Optional[VecNormalize]) associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
- obs_batch: (np.ndarray) batch of observations
- act_batch: (numpy float) batch of actions executed given obs_batch
- rew_batch: (numpy float) rewards received as results of executing act_batch
- next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch
- done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode
and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes, env=env)
# TODO: scan/"burn in"
class RecurrentReplayBuffer(ReplayBuffer):
__name__ = "RecurrentReplayBuffer"
def __init__(self, size, sequence_length=1, scan_length=0, extra_data_names=(), rnn_inputs=(), her_k=4):
super().__init__(size)
self._sample_cycle = 0
self.her_k = her_k
self._extra_data_names = sorted(extra_data_names)
self._data_name_to_idx = {"obs": 0, "action": 1, "reward": 2, "obs_tp1": 3, "done": 4,
**{name: 5 + i for i, name in enumerate(self._extra_data_names)}}
self._current_episode_data = []
self.sequence_length = sequence_length
assert self.sequence_length >= 1
self.scan_length = scan_length
self._rnn_inputs = rnn_inputs
assert self.scan_length == 0 or len(self._rnn_inputs) > 0
self._is_full = False
def add(self, obs_t, action, reward, obs_tp1, done, *extra_data, **extra_data_kwargs):
if self.her_k > 0:
obs_t = [obs_t]
obs_tp1 = [obs_tp1]
reward = [reward]
data = [obs_t, action, reward, obs_tp1, done, *extra_data, *[extra_data_kwargs[k] for k in sorted(extra_data_kwargs)]] # Data needs to be mutable
self._current_episode_data.append(data)
self._sample_cycle += 1
if done:
self.store_episode()
def store_episode(self):
if len(self._current_episode_data) >= self.sequence_length + self.scan_length:
if self._sample_cycle >= self.buffer_size:
self._next_idx = 0
self._sample_cycle = 0
self._is_full = True
if not self._is_full:
self._storage.append(self._current_episode_data)
else:
try:
self._storage[self._next_idx] = self._current_episode_data
except IndexError:
self._storage.append(self._current_episode_data)
self._next_idx += 1
else:
if self.her_k > 0:
self._sample_cycle -= sum([len(t[0]) for t in self._current_episode_data])
else:
self._sample_cycle -= len(self._current_episode_data)
self._current_episode_data = []
def add_her(self, obs, obs_tp1, reward, timestep, ep_index=None):
assert self.her_k > 0
if ep_index is not None:
episode_data = self._storage[ep_index]
else:
episode_data = self._current_episode_data
episode_data[timestep][0].append(obs)
episode_data[timestep][2].append(reward)
episode_data[timestep][3].append(obs_tp1)
self._sample_cycle += 1
def sample(self, batch_size, sequence_length=None, **_kwargs):
if sequence_length is None:
sequence_length = self.sequence_length
assert batch_size % sequence_length == 0
ep_idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size // sequence_length)]
ep_ts = [random.randint(self.scan_length, len(self._storage[ep_i]) - 1 - (sequence_length - 1)) for ep_i in
ep_idxes]
extra_data = {name: [] for name in self._extra_data_names}
extra_data.update({"scan_{}".format(name): [] for name in self._rnn_inputs})
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i, ep_i in enumerate(ep_idxes):
ep_data = self.storage[ep_i]
ep_t = ep_ts[i]
if self.her_k > 0:
her_idx = random.randint(0, self.her_k + 2)
for scan_t in range(ep_t - self.scan_length, ep_t):
for scan_data_name in self._rnn_inputs:
data = ep_data[scan_t][self._data_name_to_idx[scan_data_name]]
if self.her_k > 0 and self._data_name_to_idx[scan_data_name] in [self._data_name_to_idx[n] for n in
["obs", "reward", "obs_tp1"]]:
data = data[0]
extra_data["scan_{}".format(scan_data_name)].append(data)
for seq_i in range(sequence_length):
obs_t, action, reward, obs_tp1, done, *extra_timestep_data = ep_data[ep_t + seq_i]
if self.her_k > 0:
try: # TODO: fix indexing with last timestep data not having her data
obs_t, obs_tp1, reward = obs_t[her_idx], obs_tp1[her_idx], reward[her_idx]
except IndexError:
obs_t, obs_tp1, reward = obs_t[0], obs_tp1[0], reward[0]
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
for data_i, extra_data_name in enumerate(self._extra_data_names):
if "state" in extra_data_name:
if seq_i > 0: # For RNN states only get the first state in the sequence (or in the scan)
continue
data = ep_data[ep_t + seq_i - self.scan_length][self._data_name_to_idx[extra_data_name]]
else:
data = extra_timestep_data[data_i]
if np.ndim(data) == 0:
extra_data[extra_data_name].append(data)
else:
extra_data[extra_data_name].append(np.array(data, copy=False))
extra_data = {k: np.array(v) for k, v in extra_data.items()}
extra_data["state_idxs"] = list(zip(ep_idxes, [t + sequence_length for t in ep_ts]))
if self.scan_length > 0:
extra_data["state_idxs_scan"] = list(zip(ep_idxes, ep_ts))
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones), extra_data
def update_state(self, idxs, data):
for i, (ep_idx, t) in enumerate(idxs):
try:
for state_name, state_val in data.items():
self.storage[ep_idx][t][self._data_name_to_idx[state_name]] = state_val[i, :]
except IndexError: # Hidden state computed for last sample in episode, doesnt belong to any sample
pass
def __len__(self): # TODO: consider if this is important enough to do right
return max(self._sample_cycle - ((len(self._current_episode_data) - 1) * (1 + self.her_k) + 1), 0) \
if not self._is_full else self.buffer_size
def is_full(self):
return self._is_full
# TODO: maybe add support for episode constant data
class EpisodicRecurrentReplayBuffer(ReplayBuffer):
__name__ = "EpisodicRecurrentReplayBuffer"
def __init__(self, size, episode_length, sequence_length=10, extra_data_names=()):
super().__init__(size // episode_length)
self._current_episode_data = []
# self._episode_data = [] # Data which is constant within episode
self._extra_data_names = sorted(extra_data_names)
self._data_name_to_idx = {"obs": 0, "action": 1, "reward": 2, "obs_tp1": 3, "done": 4}
self._data_name_to_idx.update({name: i + 5 for i, name in enumerate(self._extra_data_names)})
self._sequence_length = sequence_length # TODO: add scan length and assert is multiple of sample_consecutive_max
def add(self, obs_t, action, reward, obs_tp1, done, *extra_data):
self._current_episode_data.append(
[obs_t, action, reward, obs_tp1, done, *extra_data]) # List to support updating states etc.
if done:
self.store_episode()
def store_episode(self):
if len(self._current_episode_data) == 0:
return
if self._next_idx >= len(self._storage):
self._storage.append(self._current_episode_data)
else:
self._storage[self._next_idx] = self._current_episode_data
self._next_idx = (self._next_idx + 1) % self._maxsize
self._current_episode_data = []
def sample(self, batch_size, sequence_length=None):
if sequence_length is None:
sequence_length = self._sequence_length
samples_left = batch_size
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
extra_data = [[] for i in range(len(self._extra_data_names))]
state_idxs = []
while samples_left > 0:
ep_idx = np.random.randint(0, len(self._storage) - 1)
ep_data = self._storage[ep_idx]
ep_start_idx = np.random.randint(0, max(len(ep_data) - sequence_length, 1))
ep_data = ep_data[ep_start_idx:ep_start_idx + sequence_length + 1]
state_idxs.append((ep_idx, ep_start_idx + sequence_length))
if len(ep_data) > samples_left:
ep_data = ep_data[:samples_left]
for j, timestep in enumerate(ep_data):
obs_t, action, reward, obs_tp1, done, *extra_timestep_data = timestep
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
for data_i, data in enumerate(extra_timestep_data):
if np.ndim(data) == 0:
extra_data[data_i].append(data)
else:
extra_data[data_i].append(np.array(data, copy=False))
samples_left -= len(ep_data)
assert samples_left >= 0
extra_data_dict = {name: np.array(extra_data[i]) for i, name in enumerate(self._extra_data_names)}
extra_data_dict["reset"] = np.zeros(shape=(batch_size,)) # np.array(resets)
extra_data_dict["state"] = extra_data_dict["state"][::sequence_length]
extra_data_dict["state_idxs"] = state_idxs
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(
dones), extra_data_dict
def update_state(self, idxs, data):
for i, (ep_idx, t) in enumerate(idxs):
if isinstance(data, list):
self.storage[ep_idx][t][self._data_name_to_idx["pi_state"]] = data[0][i, :]
self.storage[ep_idx][t][self._data_name_to_idx["qf1_state"]] = data[1][i, :]
self.storage[ep_idx][t][self._data_name_to_idx["qf2_state"]] = data[2][i, :]
else:
self.storage[ep_idx][t][self._data_name_to_idx["state"]] = data[i, :]
def __len__(self):
if len(self.storage) > 1:
return sum([len(episode) for episode in self._storage])
else:
return 0
class DRRecurrentReplayBuffer(ReplayBuffer):
__name__ = "DRRecurrentReplayBuffer"
def __init__(self, size, episode_max_len, scan_length, her_k=4):
self.her_k = her_k
super().__init__(size)
self._scan_length = scan_length
self._maxsize = self._maxsize // episode_max_len
self._episode_my = []
def add(self, obs_t, action, reward, obs_tp1, done, goal, my=None):
assert not (done and my is None)
if self.her_k > 0:
goal = [goal]
reward = [reward]
data = (obs_t, action, reward, obs_tp1, done, goal)
self._current_episode_data.append(data)
if done:
if self._next_idx >= len(self._storage):
self._storage.append(self._current_episode_data)
self._episode_my.append(my)
else:
self._storage[self._next_idx] = self._current_episode_data
self._episode_my[self._next_idx] = my
self._next_idx = (self._next_idx + 1) % self._maxsize
self._current_episode_data = []
def add_her(self, goal, reward, timestep, ep_index=None):
assert self.her_k > 0
if ep_index is not None:
episode_data = self._storage[ep_index]
else:
episode_data = self._current_episode_data
episode_data[timestep][5].append(goal)
episode_data[timestep][2].append(reward)
def sample(self, batch_size, **_kwargs):
if self.her_k > 0:
num_episodes = len(self._storage)
ep_idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
ep_ts = [
random.randint(self._scan_length * (1 + self.her_k), (len(self._storage[ep_i]) - 1) * (1 + self.her_k))
for ep_i in ep_idxes] # - self._optim_length)
return self._encode_sample(ep_idxes, ep_ts)
else:
return super(DRRecurrentReplayBuffer, self).sample(batch_size)
def _encode_sample(self, ep_idxes, ep_ts):
obses_t, actions, rewards, obses_tp1, dones, goals, mys, hists_o, hists_a = [], [], [], [], [], [], [], [], []
for i, ep_i in enumerate(ep_idxes):
if self.her_k > 0:
ep_t = int(ep_ts[i] / (self.her_k + 1))
else:
ep_t = ep_ts[i]
ep_data = self._storage[ep_i]
obs_t, action, reward, obs_tp1, done, goal = ep_data[ep_t]
if self.her_k > 0:
goal = goal[ep_ts[i] - ep_t * (self.her_k + 1)]
reward = reward[ep_ts[i] - ep_t * (self.her_k + 1)]
if self._scan_length > 0:
ep_scan_start = ep_t - self._scan_length if ep_t - self._scan_length >= 0 else 0
hist_o, hist_a = [], []
for hist_i in range(ep_scan_start, ep_t):
hist_o.append(np.array(ep_data[hist_i][0]))
if hist_i > 0:
hist_a.append(np.array(ep_data[hist_i - 1][1]))
else:
hist_a.append(np.zeros(shape=(len(ep_data[0][1]),)))
hist_o.append(np.array(obs_t))
hist_a.append(np.array(ep_data[ep_t - 1][1]))
else:
hist_o = [obs_t]
hist_a = [ep_data[ep_t - 1][1]]
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
hists_o.extend(hist_o)
hists_a.extend(hist_a)
goals.append(np.array(goal, copy=False))
mys.append(np.array(self._episode_my[ep_i], copy=False))
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones), {
"goal": np.array(goals), "obs_rnn": np.array(hists_o), "action_prev": np.array(hists_a),
"my": np.array(mys)}
class PrioritizedReplayBuffer(ReplayBuffer):
__name__ = "PrioritizedReplayBuffer"
def __init__(self, size, alpha):
"""
Create Prioritized Replay buffer.
See Also ReplayBuffer.__init__
:param size: (int) Max number of transitions to store in the buffer. When the buffer overflows the old memories
are dropped.
:param alpha: (float) how much prioritization is used (0 - no prioritization, 1 - full prioritization)
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def add(self, obs_t, action, reward, obs_tp1, done):
"""
add a new transition to the buffer
:param obs_t: (Any) the last observation
:param action: ([float]) the action
:param reward: (float) the reward of the transition
:param obs_tp1: (Any) the current observation
:param done: (bool) is the episode done
"""
idx = self._next_idx
super().add(obs_t, action, reward, obs_tp1, done)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def extend(self, obs_t, action, reward, obs_tp1, done):
"""
add a new batch of transitions to the buffer
:param obs_t: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the last batch of observations
:param action: (Union[Tuple[Union[np.ndarray, int]]], np.ndarray]) the batch of actions
:param reward: (Union[Tuple[float], np.ndarray]) the batch of the rewards of the transition
:param obs_tp1: (Union[Tuple[Union[np.ndarray, int]], np.ndarray]) the current batch of observations
:param done: (Union[Tuple[bool], np.ndarray]) terminal status of the batch
Note: uses the same names as .add to keep compatibility with named argument passing
but expects iterables and arrays with more than 1 dimensions
"""
idx = self._next_idx
super().extend(obs_t, action, reward, obs_tp1, done)
while idx != self._next_idx:
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
idx = (idx + 1) % self._maxsize
def _sample_proportional(self, batch_size):
mass = []
total = self._it_sum.sum(0, len(self._storage) - 1)
# TODO(szymon): should we ensure no repeats?
mass = np.random.random(size=batch_size) * total
idx = self._it_sum.find_prefixsum_idx(mass)
return idx
def sample(self, batch_size: int, beta: float = 0, env: Optional[VecNormalize] = None):
"""
Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
:param batch_size: (int) How many transitions to sample.
:param beta: (float) To what degree to use importance weights (0 - no corrections, 1 - full correction)
:param env: (Optional[VecNormalize]) associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
- obs_batch: (np.ndarray) batch of observations
- act_batch: (numpy float) batch of actions executed given obs_batch
- rew_batch: (numpy float) rewards received as results of executing act_batch
- next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch
- done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode
and 0 otherwise.
- weights: (numpy float) Array of shape (batch_size,) and dtype np.float32 denoting importance weight of
each sampled transition
- idxes: (numpy int) Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
p_sample = self._it_sum[idxes] / self._it_sum.sum()
weights = (p_sample * len(self._storage)) ** (-beta) / max_weight
encoded_sample = self._encode_sample(idxes, env=env)
return tuple(list(encoded_sample) + {"is_weights": weights, "idxs": idxes})
def update_priorities(self, idxes, priorities):
"""
Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
:param idxes: ([int]) List of idxes of sampled transitions
:param priorities: ([float]) List of updated priorities corresponding to transitions at the sampled idxes
denoted by variable `idxes`.
"""
assert len(idxes) == len(priorities)
assert np.min(priorities) > 0
assert np.min(idxes) >= 0
assert np.max(idxes) < len(self.storage)
self._it_sum[idxes] = priorities ** self._alpha
self._it_min[idxes] = priorities ** self._alpha
self._max_priority = max(self._max_priority, np.max(priorities))
class DiscrepancyReplayBuffer(ReplayBuffer):
def __init__(self, size, scorer):
"""
Create Prioritized Replay buffer.
See Also ReplayBuffer.__init__
:param size: (int) Max number of transitions to store in the buffer. When the buffer overflows the old memories
are dropped.
:param alpha: (float) how much prioritization is used (0 - no prioritization, 1 - full prioritization)
"""
super(DiscrepancyReplayBuffer, self).__init__(size)
self.scores = []
self.scorer = scorer
self.min_score = None
self.max_score = None
def add(self, obs_t, action, reward, obs_tp1, done):
"""
add a new transition to the buffer
:param obs_t: (Any) the last observation
:param action: ([float]) the action
:param reward: (float) the reward of the transition
:param obs_tp1: (Any) the current observation
:param done: (bool) is the episode done
"""
idx = self._next_idx
score = self.scorer(np.expand_dims(obs_tp1, axis=0))[0][0]
if self.min_score is None or score < self.min_score:
self.min_score = score
if self.max_score is None or score > self.max_score:
self.max_score = score
if self._next_idx >= len(self._storage):
self.scores.append(score)
else:
self.scores[idx] = score
super().add(obs_t, action, reward, obs_tp1, done)
self.storage[-1] += (self.scorer(np.expand_dims(obs_tp1, axis=0)),)
def sample(self, batch_size, **_kwargs):
"""
Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
:param batch_size: (int) How many transitions to sample.
:param beta: (float) To what degree to use importance weights (0 - no corrections, 1 - full correction)
:return:
- obs_batch: (np.ndarray) batch of observations
- act_batch: (numpy float) batch of actions executed given obs_batch
- rew_batch: (numpy float) rewards received as results of executing act_batch
- next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch
- done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode
and 0 otherwise.
- weights: (numpy float) Array of shape (batch_size,) and dtype np.float32 denoting importance weight of
each sampled transition
- idxes: (numpy int) Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences
"""
if not self.can_sample(batch_size):
return self._encode_sample(list(range(len(self))))
scores = self._scale_scores(np.array(self.scores))
idxs = np.random.choice(np.arange(len(scores)), size=(batch_size,), p=scores / np.sum(scores), replace=False)
return self._encode_sample(idxs)
def update_priorities(self):
"""
Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
:param idxes: ([int]) List of idxes of sampled transitions
:param priorities: ([float]) List of updated priorities corresponding to transitions at the sampled idxes
denoted by variable `idxes`.
"""
scores = self.scorer([transition[0] for transition in self.storage])[:, 0]
for i, transition in enumerate(self.storage):
transition[-1] = scores[i]
def _scale_scores(self, vals):
return (vals - self.min_score) / (self.max_score - self.min_score) * (1 - 0.1) + 0.1
class StableReplayBuffer(ReplayBuffer):
__name__ = "StableReplayBuffer"
def __init__(self, size):
"""
Create Prioritized Replay buffer.
See Also ReplayBuffer.__init__
:param size: (int) Max number of transitions to store in the buffer. When the buffer overflows the old memories
are dropped.
:param alpha: (float) how much prioritization is used (0 - no prioritization, 1 - full prioritization)
"""
super(StableReplayBuffer, self).__init__(size)
self.scores = []
self.lower_clip = None
self.upper_clip = None
def add(self, obs_t, action, reward, obs_tp1, done, score=None):
"""
add a new transition to the buffer
:param obs_t: (Any) the last observation
:param action: ([float]) the action
:param reward: (float) the reward of the transition
:param obs_tp1: (Any) the current observation
:param done: (bool) is the episode done
"""
idx = self._next_idx
if self._next_idx >= len(self._storage):
self.scores.append(score)
else:
self.scores[idx] = score
super().add(obs_t, action, reward, obs_tp1, done)
def sample(self, batch_size, **_kwargs):
"""
Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
:param batch_size: (int) How many transitions to sample.
:param beta: (float) To what degree to use importance weights (0 - no corrections, 1 - full correction)
:return:
- obs_batch: (np.ndarray) batch of observations
- act_batch: (numpy float) batch of actions executed given obs_batch
- rew_batch: (numpy float) rewards received as results of executing act_batch
- next_obs_batch: (np.ndarray) next set of observations seen after executing act_batch
- done_mask: (numpy bool) done_mask[i] = 1 if executing act_batch[i] resulted in the end of an episode
and 0 otherwise.
- weights: (numpy float) Array of shape (batch_size,) and dtype np.float32 denoting importance weight of
each sampled transition
- idxes: (numpy int) Array of shape (batch_size,) and dtype np.int32 idexes in buffer of sampled experiences
"""
if not self.can_sample(batch_size):
return self._encode_sample(list(range(len(self))))
scores = np.array(self.scores)
scores = np.clip(scores, | np.percentile(scores, 10) | numpy.percentile |
## CPPN functions and classes
# Imports
import math
import numpy
from evolve_soft_2d import utility
################################################################################
class cppn:
"""The CPPN class object
"""
def __init__(
self,
seed: int,
mod_n: int,
scale: float,
hl_n: int,
hl_s: int,
thresh: float,
x: int,
y: int,
) -> None:
"""The CPPN parameters
Parameters
----------
seed : int
The seed for the random generation
mod_n : int
The number of models to be generated from a particular seed
scale : float
The scale of the focus on the model
hl_n : int
The number of hidden layers
hl_s : int
The size of the initial hidden layer
thresh : float
The rounding/removal threshold
x : int
The number of elements in the x-direction
y : int
The number of elements in the y-direction
"""
self.seed = seed
self.mod_n = mod_n
self.scale = scale
self.hl_n = hl_n
self.hl_s = hl_s
self.thresh = thresh
self.x = x
self.y = y
# The resolution of the grid
self.res = self.x*self.y
# Build the grid
self.grid = self.cppn_grid()
def __repr__(self) -> str:
"""Format a representation of the CPPN
Returns
-------
str
Formatted representation of the CPPN for the log
"""
r = "Model Dimensions: {}x{} elements\n".format(self.x, self.y)
r += "Model Seed: {}\n".format(self.seed)
r += "Number Of Models Generated: {}\n".format(self.mod_n)
r += "Model Scale: 1:{}\n".format(self.scale)
r += "Number Of Hidden Layers: {}\n".format(self.hl_n)
r += "Size Of Initial Hidden Layer: {}\n".format(self.hl_s)
if self.thresh < 1:
r += "Rounding Threshold: {}\n".format(self.thresh)
else:
r += "Percentage Of Elements Removed: {}%\n".format(self.thresh)
r += "Activation Functions:\n"
for i in self.af:
r += "{}\n".format(i)
return r
def cppn_grid(self) -> numpy.array:
"""Generates model grids
Returns
-------
numpy.array
The model grid
"""
# Initialisations
self.af = []
# The list of possible activation functions
af_l = [self.cppn_sin, self.cppn_cos, self.cppn_tanh, self.cppn_sigm, self.cppn_srel]
af_o = [self.cppn_sigm, self.cppn_srel]
# Set the random generation seed
numpy.random.seed(seed = self.seed)
# Generate the initial hidden layer for each model
hl = numpy.random.uniform(low = -1, high = 1, size = (self.mod_n, self.hl_s)).astype(numpy.float32)
# Generate the grid matrix
x_r = numpy.linspace(-1*self.scale, self.scale, num = self.x)
x_m = numpy.matmul(numpy.ones((self.y, 1)), x_r.reshape((1, self.x)))
y_r = numpy.linspace(-1*self.scale, self.scale, num = self.y)
y_m = numpy.matmul(y_r.reshape((self.y, 1)), numpy.ones((1, self.x)))
r_m = numpy.sqrt(x_m*x_m + y_m*y_m)
x_d = numpy.tile(x_m.flatten(), self.mod_n).reshape(self.mod_n, self.res, 1)
y_d = numpy.tile(y_m.flatten(), self.mod_n).reshape(self.mod_n, self.res, 1)
r_d = numpy.tile(r_m.flatten(), self.mod_n).reshape(self.mod_n, self.res, 1)
# Scale the initial hidden layers
hl_scale = numpy.reshape(hl, (self.mod_n, 1, self.hl_s))*numpy.ones((self.res, 1), dtype = numpy.float32)*self.scale
# Unwrap the grid matrices
x_d_unwrap = numpy.reshape(x_d, (self.mod_n*self.res, 1))
y_d_unwrap = numpy.reshape(y_d, (self.mod_n*self.res, 1))
r_d_unwrap = numpy.reshape(r_d, (self.mod_n*self.res, 1))
hl_unwrap = numpy.reshape(hl_scale, (self.mod_n*self.res, self.hl_s))
# Build the network
n = self.fully_connected(hl_unwrap, self.hl_n, True, self.seed) + self.fully_connected(x_d_unwrap, self.hl_n, False, self.seed + 1) + self.fully_connected(y_d_unwrap, self.hl_n, False, self.seed + 2) + self.fully_connected(r_d_unwrap, self.hl_n, False, self.seed + 3)
# Transpose the network
n = n.T
if self.hl_n > 1:
# Loop through the second to second-last hidden layers
for i in range(1, self.hl_n - 1):
# Set the seed for each layer
numpy.random.seed(seed = self.seed + i)
# Select and record the activation function
n[i], af_c = numpy.random.choice(af_l)(n[i - 1])
self.af.append(af_c)
# Set the seed for the final layer
numpy.random.seed(seed = self.seed)
# Apply and record the final function
n[-1], af_o = numpy.random.choice(af_o)(n[-2])
self.af.append(af_o)
else:
# Set the seed for each layer
numpy.random.seed(seed = self.seed)
# Select and record the activation function
n[0], af_c = numpy.random.choice(af_l)(n[0])
self.af.append(af_c)
# Apply and record the final function
n[0], af_o = numpy.random.choice(af_o)(n[0])
self.af.append(af_o)
# Reshape the grid to fit the given dimensions
mod = numpy.reshape(n[-1], (self.mod_n, self.x, self.y))
return mod
def fully_connected(
self,
i_v: numpy.array,
o_d,
w_bias: bool,
seed: int,
) -> numpy.array:
"""Connect all layers of the CPPN
Parameters
----------
i_v : numpy.array
The input vector
o_d
The output dimensions
seed : int
The random generation
w_bias : bool
If the layers should be connected with bias
Returns
-------
numpy.array
The connected results
"""
# Set the random generation seed
numpy.random.seed(seed = seed)
# Generate the random matrix
m = numpy.random.standard_normal(size = (i_v.shape[1], o_d)).astype(numpy.float32)
# Multiply the input with the matrix
result = | numpy.matmul(i_v, m) | numpy.matmul |
# Copyright (c) 2020-2021 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel, and University of Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import numpy as np
import pandapipes.networks.simple_water_networks as nw
import pytest
from pandapipes.pipeflow import logger as pf_logger
from pandapipes.test.stanet_comparison.pipeflow_stanet_comparison import pipeflow_stanet_comparison
try:
import pandaplan.core.pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
pf_logger.setLevel(logging.WARNING)
# ---------- TEST AREA: combined networks ----------
# district_N
def test_case_district_grid_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_district_grid(method="n")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# district_PC
def test_case_district_grid_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_district_grid(method="pc")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.03)
assert np.all(v_diff_abs < 0.03)
# ---------- TEST AREA: meshed networks ----------
# pumps_N
def test_case_pumps_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_meshed_pumps(results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# delta_N
def test_case_delta_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_meshed_delta(results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# two_valves_N
def test_case_meshed_2valves_n(log_results=False):
net = nw.water_meshed_2valves(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.001)
assert np.all(v_diff_abs < 0.001)
# two_valves_PC
def test_case_meshed_2valves_pc(log_results=False):
net = nw.water_meshed_2valves(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.001)
assert np.all(v_diff_abs < 0.001)
# ---------- TEST AREA: one pipe ----------
# pipe_1_N
def test_case_one_pipe1_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe1(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_1_PC
def test_case_one_pipe1_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe1(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_2_N
def test_case_one_pipe2_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe2(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_2_PC
def test_case_one_pipe2_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe2(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_3_N
def test_case_one_pipe3_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe3(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pipe_3_PC
def test_case_one_pipe3_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_one_pipe3(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# ---------- TEST AREA: strand net ----------
# strand_net_N
def test_case_simple_strand_net_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_simple_strand_net(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# strand_net_PC
def test_case_simple_strand_net_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_simple_strand_net(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.01)
assert np.all(v_diff_abs < 0.03)
# two_pipes_N
def test_case_two_pipes_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_2pipes(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# two_pipes_PC
def test_case_two_pipes_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_2pipes(method="pc", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# cross_PC
def test_case_cross_pc(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_cross(results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results, friction_model="colebrook")
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# pump_N
def test_case_pump_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_strand_pump()
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert np.all(v_diff_abs < 0.03)
# ---------- TEST AREA: t_cross ----------
# t-cross_N
def test_case_tcross_n(log_results=False):
"""
:param log_results:
:type log_results:
:return:
:rtype:
"""
net = nw.water_tcross(method="n", results_from="stanet")
p_diff, v_diff_abs = pipeflow_stanet_comparison(net, log_results)
assert np.all(p_diff < 0.002)
assert | np.all(v_diff_abs < 0.03) | numpy.all |
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.model_selection import KFold
def personal_normalisation(data):
df = data.set_index('subject')
for subj in df.index.unique():
aux = df.loc[subj].iloc[:, :-1]
cols = aux.columns
aux = StandardScaler().fit_transform(aux)
df.loc[subj, cols] = aux
return df.reset_index()
def cv_leave_three_out(data, seed=123):
train_indices = list()
test_indices = list()
subjects = data.subject.unique()
np.random.seed(seed)
np.random.shuffle(subjects)
for i, subject in enumerate(subjects):
testsubjects = [subjects[i], subjects[(i + 1) % len(subjects)], subjects[(i + 2) % len(subjects)]]
trainsubjects = np.delete(subjects, [i, (i + 1) % len(subjects), (i + 2) % len(subjects)])
test_i = list()
for tests in testsubjects:
test_i.append( | np.array(data[data.subject == tests].index) | numpy.array |
import pandas as pd
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage.transform import resize
import itertools
from sklearn.metrics import confusion_matrix,roc_auc_score, roc_curve, auc, precision_recall_curve, average_precision_score, f1_score
import seaborn as sns
import scipy
from scipy import stats
from sklearn.utils import resample
def zero_pad(img, size=448):
'''
pad zeros to make a square img for resize
'''
h, w, c = img.shape
if h > w:
zeros = np.zeros([h, h - w, c]).astype(np.uint8)
img_padded = np.hstack((img, zeros))
elif h < w:
zeros = np.zeros([w - h, w, c]).astype(np.uint8)
img_padded = np.vstack((img, zeros))
else:
img_padded = img
img_resized = (255 * resize(img_padded, (size, size), anti_aliasing=True)).astype(np.uint8)
return img_resized
def get_precision_recall(ax, y_true, y_pred, title, boostrap=5, plot=True):
def delta_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
return h
ap_score = []
for i in range(boostrap):
pred_bt, y_bt = resample(y_pred, y_true)
ap_score.append(average_precision_score(y_bt, pred_bt))
AP = average_precision_score(y_true, y_pred)
precision, recall, thresholds = precision_recall_curve(y_true, y_pred)
if plot:
delta = delta_confidence_interval(ap_score)
sns.set_style('ticks')
# plt.figure()
ax.plot(recall, precision, color='red', lw=2,
label='AUC = {:.3f}, \n95% C.I. = [{:.3f}, {:.3f}]'.format(AP, AP - delta, AP + delta), alpha=.8)
ax.set_xlabel('Recall', fontsize=16, fontweight='bold')
ax.set_ylabel('Precision', fontsize=16, fontweight='bold')
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.set_title(title, fontsize=16, fontweight='bold')
ax.legend(fontsize=12, loc='lower right')
ax.grid()
return thresholds
def get_auc(ax, y_true, y_score, title, plot=True):
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_true, y_score)
auc_keras = auc(fpr_keras, tpr_keras)
optimal_idx = np.argmax(tpr_keras - fpr_keras)
optimal_threshold = thresholds_keras[optimal_idx]
if plot:
ci = get_CI(y_true, y_score)
sns.set_style('ticks')
# plt.figure()
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='orange', label='Chance', alpha=.8)
ax.plot(fpr_keras, tpr_keras, color='red', lw=2,
label='AUC = {:.3f}, \n95% C.I. = [{:.3f}, {:.3f}]'.format(auc_keras, ci[0], ci[1]), alpha=.8)
ax.set_xlabel('Specificity', fontsize=16, fontweight='bold')
ax.set_ylabel('Sensitivity', fontsize=16, fontweight='bold')
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.set_title(title, fontsize=16, fontweight='bold')
ax.legend(fontsize=12, loc='lower right')
ax.grid()
return optimal_threshold
def get_CI(y_true, y_score, alpha=0.95):
auc, auc_cov = delong_roc_variance(y_true, y_score)
auc_std = np.sqrt(auc_cov)
lower_upper_q = np.abs(np.array([0, 1]) - (1 - alpha) / 2)
ci = stats.norm.ppf(lower_upper_q, loc=auc, scale=auc_std)
ci[ci > 1] = 1
print('AUC:', auc)
print('AUC COV:', auc_cov)
print('95% AUC CI:', ci)
return ci
def delong_roc_variance(ground_truth, predictions, sample_weight=None):
"""
Computes ROC AUC variance for a single set of predictions
Args:
ground_truth: np.array of 0 and 1
predictions: np.array of floats of the probability of being class 1
"""
order, label_1_count, ordered_sample_weight = compute_ground_truth_statistics(
ground_truth, sample_weight)
predictions_sorted_transposed = predictions[np.newaxis, order]
aucs, delongcov = fastDeLong(predictions_sorted_transposed, label_1_count, ordered_sample_weight)
assert len(aucs) == 1, "There is a bug in the code, please forward this to the developers"
return aucs[0], delongcov
def compute_ground_truth_statistics(ground_truth, sample_weight):
assert np.array_equal(np.unique(ground_truth), [0, 1])
order = (-ground_truth).argsort()
label_1_count = int(ground_truth.sum())
if sample_weight is None:
ordered_sample_weight = None
else:
ordered_sample_weight = sample_weight[order]
return order, label_1_count, ordered_sample_weight
def fastDeLong(predictions_sorted_transposed, label_1_count, sample_weight):
if sample_weight is None:
return fastDeLong_no_weights(predictions_sorted_transposed, label_1_count)
else:
return fastDeLong_weights(predictions_sorted_transposed, label_1_count, sample_weight)
def fastDeLong_weights(predictions_sorted_transposed, label_1_count, sample_weight):
"""
The fast version of DeLong's method for computing the covariance of
unadjusted AUC.
Args:
predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]
sorted such as the examples with label "1" are first
Returns:
(AUC value, DeLong covariance)
Reference:
@article{sun2014fast,
title={Fast Implementation of DeLong's Algorithm for
Comparing the Areas Under Correlated Receiver Oerating Characteristic Curves},
author={<NAME> and <NAME>},
journal={IEEE Signal Processing Letters},
volume={21},
number={11},
pages={1389--1393},
year={2014},
publisher={IEEE}
}
"""
# Short variables are named as they are in the paper
m = label_1_count
n = predictions_sorted_transposed.shape[1] - m
positive_examples = predictions_sorted_transposed[:, :m]
negative_examples = predictions_sorted_transposed[:, m:]
k = predictions_sorted_transposed.shape[0]
tx = np.empty([k, m], dtype=np.float)
ty = np.empty([k, n], dtype=np.float)
tz = np.empty([k, m + n], dtype=np.float)
for r in range(k):
tx[r, :] = compute_midrank_weight(positive_examples[r, :], sample_weight[:m])
ty[r, :] = compute_midrank_weight(negative_examples[r, :], sample_weight[m:])
tz[r, :] = compute_midrank_weight(predictions_sorted_transposed[r, :], sample_weight)
total_positive_weights = sample_weight[:m].sum()
total_negative_weights = sample_weight[m:].sum()
pair_weights = np.dot(sample_weight[:m, np.newaxis], sample_weight[np.newaxis, m:])
total_pair_weights = pair_weights.sum()
aucs = (sample_weight[:m] * (tz[:, :m] - tx)).sum(axis=1) / total_pair_weights
v01 = (tz[:, :m] - tx[:, :]) / total_negative_weights
v10 = 1. - (tz[:, m:] - ty[:, :]) / total_positive_weights
sx = np.cov(v01)
sy = np.cov(v10)
delongcov = sx / m + sy / n
return aucs, delongcov
def fastDeLong_no_weights(predictions_sorted_transposed, label_1_count):
"""
The fast version of DeLong's method for computing the covariance of
unadjusted AUC.
Args:
predictions_sorted_transposed: a 2D numpy.array[n_classifiers, n_examples]
sorted such as the examples with label "1" are first
Returns:
(AUC value, DeLong covariance)
Reference:
@article{sun2014fast,
title={Fast Implementation of DeLong's Algorithm for
Comparing the Areas Under Correlated Receiver Oerating
Characteristic Curves},
author={<NAME> and <NAME>},
journal={IEEE Signal Processing Letters},
volume={21},
number={11},
pages={1389--1393},
year={2014},
publisher={IEEE}
}
"""
# Short variables are named as they are in the paper
m = label_1_count
n = predictions_sorted_transposed.shape[1] - m
positive_examples = predictions_sorted_transposed[:, :m]
negative_examples = predictions_sorted_transposed[:, m:]
k = predictions_sorted_transposed.shape[0]
tx = np.empty([k, m], dtype=np.float)
ty = np.empty([k, n], dtype=np.float)
tz = np.empty([k, m + n], dtype=np.float)
for r in range(k):
tx[r, :] = compute_midrank(positive_examples[r, :])
ty[r, :] = compute_midrank(negative_examples[r, :])
tz[r, :] = compute_midrank(predictions_sorted_transposed[r, :])
aucs = tz[:, :m].sum(axis=1) / m / n - float(m + 1.0) / 2.0 / n
v01 = (tz[:, :m] - tx[:, :]) / n
v10 = 1.0 - (tz[:, m:] - ty[:, :]) / m
sx = np.cov(v01)
sy = np.cov(v10)
delongcov = sx / m + sy / n
return aucs, delongcov
def calc_pvalue(aucs, sigma):
"""Computes log(10) of p-values.
Args:
aucs: 1D array of AUCs
sigma: AUC DeLong covariances
Returns:
log10(pvalue)
"""
l = np.array([[1, -1]])
z = np.abs( | np.diff(aucs) | numpy.diff |
"""
Adapted from the original NMpathAnalysis package,
https://github.com/ZuckermanLab/NMpathAnalysis
"""
import numpy as np
from msm_we.fpt import DirectFPT, MarkovFPT, NonMarkovFPT
from msm_we.ensembles import DiscreteEnsemble, DiscretePathEnsemble
from msm_we.utils import map_to_integers, normalize_markov_matrix
from msm_we.utils import pops_from_nm_tmatrix, pops_from_tmatrix
from msm_we.utils import pseudo_nm_tmatrix, weighted_choice
class NonMarkovModel(DiscreteEnsemble):
"""Define a class for analyzing MD trajectories using Markovian or non-Markovian Model
from a list of 1D trajectories of integers representing macrostates
For example:
trajectories = [ [1 , 2, 0, ...], [2, 2, 1, ...], [3, 1, 2, ...], ...]
If only one sequence is given in trajectories, the format is the same:
trajectories = [ [1 , 2, 0, ...] ]
Parameters
----------
lag_time (integer, default: 1)
Lag time of the model.
sliding_window (boolean)
Use a sliding window of length lag_time to compute the count matrix
stateA, stateB (python lists)
Define the initial and final macrostates in form of python lists
for example: stateA=[0,2,5], stateB = [1]
Attributes
----------
n_states : int
nm_cmatrix: array, with shape (2 n_states, 2 n_states)
Stores the number of transitions between states, the i,j element cij
stores the number of transitions observed from i to j.
populations: array, shape (n_states,)
Equilibrium population, the steady state solution of of the
transition matrix
"""
def __init__(
self,
trajectories,
stateA,
stateB,
lag_time=1,
clean_traj=False,
sliding_window=True,
reversible=True,
markovian=False,
coarse_macrostates=False,
**kwargs
):
"""Initialize an object for Non Markovian Model Class"""
if coarse_macrostates:
for traj in trajectories:
for i, _ in enumerate(traj):
if traj[i] in stateA:
traj[i] = stateA[0]
elif traj[i] in stateB:
traj[i] = stateB[0]
stateA = [stateA[0]]
stateB = [stateB[0]]
self._lag_time = lag_time
self.trajectories = trajectories
self.stateA = stateA
self.stateB = stateB
self.sliding_window = sliding_window
self.reversible = reversible
self.markovian = markovian
self.n_variables = 1 # by construction
self.discrete = True # by construction
if (self._lag_time < 1) or (int(self._lag_time) != int(self._lag_time)):
raise ValueError(
"The lag time should be an integer \
greater than 1"
)
if clean_traj:
self.n_states = max([max(traj) for traj in self.trajectories]) + 1
else:
self._map_trajectories_to_integers()
self.fit()
def _map_trajectories_to_integers(self):
# Clean the sequences
seq_map = {}
new_trajs = []
for seq in self.trajectories:
newseq, m_dict = map_to_integers(seq, seq_map)
new_trajs.append(newseq)
self.stateA = [seq_map[i] for i in self.stateA]
self.stateB = [seq_map[i] for i in self.stateB]
self.n_states = len(seq_map)
self.trajectories = new_trajs
self.seq_map = seq_map
def fit(self):
"""Fits the non-Markovian model from a list of sequences"""
# Non-Markovian count matrix
nm_cmatrix = np.zeros((2 * self.n_states, 2 * self.n_states))
# Markovian count matrix
markov_cmatrix = np.zeros((self.n_states, self.n_states))
lag = self._lag_time
if not self.sliding_window:
step = lag
else:
step = 1
for traj in self.trajectories:
for start in range(lag, 2 * lag, step):
prev_color = None
for i in range(start, len(traj), lag):
# Color determination
if traj[i] in self.stateA:
color = "A"
elif traj[i] in self.stateB:
color = "B"
else:
color = prev_color
# Count matrix for the given lag time
if prev_color == "A" and color == "B":
nm_cmatrix[2 * traj[i - lag], 2 * traj[i] + 1] += 1.0
elif prev_color == "B" and color == "A":
nm_cmatrix[2 * traj[i - lag] + 1, 2 * traj[i]] += 1.0
elif prev_color == "A" and color == "A":
nm_cmatrix[2 * traj[i - lag], 2 * traj[i]] += 1.0
elif prev_color == "B" and color == "B":
nm_cmatrix[2 * traj[i - lag] + 1, 2 * traj[i] + 1] += 1.0
prev_color = color
markov_cmatrix[traj[i - lag], traj[i]] += 1.0
nm_tmatrix = normalize_markov_matrix(nm_cmatrix)
markov_tmatrix = normalize_markov_matrix(markov_cmatrix, reversible=True)
self.nm_tmatrix = nm_tmatrix
self.nm_cmatrix = nm_cmatrix
self.markov_cmatrix = markov_cmatrix
self.markov_tmatrix = markov_tmatrix
@classmethod
def from_nm_tmatrix(cls, transition_matrix, stateA, stateB, sim_length=None, initial_state=0):
"""Generates a discrete ensemble from the transition matrix"""
if sim_length is None:
raise Exception("The simulation length must be given")
if not isinstance(transition_matrix, np.ndarray):
transition_matrix = np.array(transition_matrix)
n_states = len(transition_matrix)
assert n_states == len(transition_matrix[0])
current_state = initial_state
discrete_traj = [initial_state // 2]
for i in range(sim_length):
next_state = weighted_choice([k for k in range(n_states)], transition_matrix[current_state, :])
discrete_traj.append(next_state // 2)
current_state = next_state
return cls([np.array(discrete_traj)], stateA, stateB, clean_traj=True)
@property
def lag_time(self):
return self._lag_time
@lag_time.setter
def lag_time(self, lag_time):
self._lag_time = lag_time
self.fit()
def mfpts(self):
if self.markovian:
return MarkovFPT.mean_fpts(self.markov_tmatrix, self.stateA, self.stateB, lag_time=self._lag_time)
else:
return NonMarkovFPT.mean_fpts(self.nm_tmatrix, self.stateA, self.stateB, lag_time=self._lag_time)
def empirical_mfpts(self):
return DirectFPT.mean_fpts(self.trajectories, self.stateA, self.stateB, lag_time=self._lag_time)
def empirical_fpts(self):
return DirectFPT.fpts(self.trajectories, self.stateA, self.stateB, lag_time=self._lag_time)
def populations(self):
# In this case the results are going to be the same
if self.markovian:
return pops_from_tmatrix(self.markov_tmatrix)
else:
return pops_from_nm_tmatrix(self.nm_tmatrix)
@property
def popA(self):
pop_A = 0
pops = self.populations()
for i, p in enumerate(pops):
if i in self.stateA:
pop_A += p
return pop_A
@property
def popB(self):
pop_B = 0
pops = self.populations()
for i, p in enumerate(pops):
if i in self.stateB:
pop_B += p
return pop_B
def tmatrixAB(self):
if self.markovian:
return self.markov_tmatrix
matrixAB = []
for i in range(0, 2 * self.n_states, 2):
for j in range(0, 2 * self.n_states, 2):
if (i // 2 in self.stateB) and not (j // 2 in self.stateB):
matrixAB.append(0.0)
elif (i // 2 in self.stateB) and (j // 2 in self.stateB):
if i // 2 == j // 2:
matrixAB.append(1.0)
else:
matrixAB.append(0.0)
elif not (i // 2 in self.stateB) and (j // 2 in self.stateB):
matrixAB.append(self.nm_tmatrix[i, j + 1])
else:
matrixAB.append(self.nm_tmatrix[i, j])
matrixAB = np.array(matrixAB)
matrixAB = matrixAB.reshape((self.n_states, self.n_states))
return matrixAB
def tmatrixBA(self):
if self.markovian:
return self.markov_tmatrix
matrixBA = []
for i in range(1, 2 * self.n_states + 1, 2):
for j in range(1, 2 * self.n_states + 1, 2):
if (i // 2 in self.stateA) and not (j // 2 in self.stateA):
matrixBA.append(0.0)
elif (i // 2 in self.stateA) and (j // 2 in self.stateA):
if i // 2 == j // 2:
matrixBA.append(1.0)
else:
matrixBA.append(0.0)
elif not (i // 2 in self.stateA) and (j // 2 in self.stateA):
matrixBA.append(self.nm_tmatrix[i, j - 1])
else:
matrixBA.append(self.nm_tmatrix[i, j])
matrixBA = np.array(matrixBA)
matrixBA = matrixBA.reshape((self.n_states, self.n_states))
return matrixBA
def fluxAB_distribution_on_B(self):
if self.markovian:
t_matrix = pseudo_nm_tmatrix(self.markov_tmatrix, self.stateA, self.stateB)
else:
t_matrix = self.nm_tmatrix
distrib_on_B = np.zeros(len(self.stateB))
labeled_pops = pops_from_tmatrix(t_matrix)
for i in range(0, 2 * self.n_states, 2):
for j in range(2 * self.n_states):
if j // 2 in self.stateB:
distrib_on_B[self.stateB.index(j // 2)] += labeled_pops[i] * t_matrix[i, j]
return distrib_on_B
def fluxBA_distribution_on_A(self):
if self.markovian:
t_matrix = pseudo_nm_tmatrix(self.markov_tmatrix, self.stateA, self.stateB)
else:
t_matrix = self.nm_tmatrix
distrib_on_A = np.zeros(len(self.stateA))
labeled_pops = pops_from_tmatrix(t_matrix)
for i in range(1, 2 * self.n_states + 1, 2):
for j in range(2 * self.n_states):
if j // 2 in self.stateA:
distrib_on_A[self.stateA.index(j // 2)] += labeled_pops[i] * t_matrix[i, j]
return distrib_on_A
def fpt_distrib_AB(self, max_x=1000, dt=1):
return MarkovFPT.fpt_distribution(
self.tmatrixAB(),
self.stateA,
self.stateB,
self.fluxBA_distribution_on_A(),
max_n_lags=max_x,
lag_time=self._lag_time,
dt=dt,
)
def fpt_distrib_BA(self, max_x=1000, dt=1):
return MarkovFPT.fpt_distribution(
self.tmatrixBA(),
self.stateB,
self.stateA,
self.fluxAB_distribution_on_B(),
max_n_lags=max_x,
lag_time=self._lag_time,
dt=dt,
)
def corr_function(self, times):
"""Compute the correlation function for a set of times.
Parameters
----------
times (list of integers):
List of dt values used to compute the correlation function.
Returns
-------
List of floats with the correlation values for the dt given in times
"""
pAA = []
pAB = []
pBA = []
pBB = []
t_matrix = self.markov_tmatrix if self.markovian else self.nm_tmatrix
tot_n_states = self.n_states if self.markovian else (2 * self.n_states)
for dt in times:
if dt % self.lag_time != 0:
raise ValueError("The times given should be " "multiple of the lag time")
n = int(dt / self.lag_time)
pops_eq = self.populations()
t_matrixT_to_n = np.linalg.matrix_power(t_matrix.T, n)
popsA_to_propagate = np.zeros(tot_n_states)
popsB_to_propagate = np.zeros(tot_n_states)
if self.markovian:
for index in self.stateA:
popsA_to_propagate[index] = pops_eq[index]
for index in self.stateB:
popsB_to_propagate[index] = pops_eq[index]
final_dist_from_A = np.dot(t_matrixT_to_n, popsA_to_propagate)
final_dist_from_B = np.dot(t_matrixT_to_n, popsB_to_propagate)
pAA.append(sum([final_dist_from_A[i] for i in self.stateA]))
pBB.append(sum([final_dist_from_B[i] for i in self.stateB]))
pAB.append(sum([final_dist_from_B[i] for i in self.stateA]))
pBA.append(sum([final_dist_from_A[i] for i in self.stateB]))
else:
for index in self.stateA:
popsA_to_propagate[2 * index] = pops_eq[index]
for index in self.stateB:
popsB_to_propagate[2 * index + 1] = pops_eq[index]
final_dist_from_A = np.dot(t_matrixT_to_n, popsA_to_propagate)
final_dist_from_B = np.dot(t_matrixT_to_n, popsB_to_propagate)
pAA.append(sum([final_dist_from_A[2 * i] for i in self.stateA]))
pBB.append(sum([final_dist_from_B[2 * i + 1] for i in self.stateB]))
pAB.append(sum([final_dist_from_B[2 * i] for i in self.stateA]))
pBA.append(sum([final_dist_from_A[2 * i + 1] for i in self.stateB]))
return pAA, pAB, pBA, pBB
def empirical_weighted_FS(self, tmatrix_for_classification=None, symmetric=True):
if tmatrix_for_classification is None:
tmatrix_for_classification = self.markov_tmatrix
ens = DiscretePathEnsemble.from_ensemble(self, self.stateA, self.stateB)
return ens.weighted_fundamental_sequences(tmatrix_for_classification, symmetric)
def weighted_FS(self, tmatrix_for_classification=None, n_paths=1000, symmetric=True):
if tmatrix_for_classification is None:
tmatrix_for_classification = self.markov_tmatrix
if self.markovian:
tmatrix_to_generate_paths = self.markov_tmatrix
else:
tmatrix_to_generate_paths = self.tmatrixAB()
ens = DiscretePathEnsemble.from_transition_matrix(tmatrix_to_generate_paths, self.stateA, self.stateB, n_paths)
return ens.weighted_fundamental_sequences(tmatrix_for_classification, symmetric)
class MarkovPlusColorModel(NonMarkovModel):
"""Define a class for analyzing MD trajectories using Markovian Plus Color Model"""
def __init__(self, trajectories, stateA, stateB, lag_time=1, clean_traj=False, sliding_window=True, hist_length=0, **kwargs):
self.hist_length = hist_length
super().__init__(trajectories, stateA, stateB, lag_time, clean_traj, sliding_window, **kwargs)
def fit(self):
"""Fits the markov plus color model from a list of sequences"""
# Non-Markovian count matrix
nm_tmatrix = np.zeros((2 * self.n_states, 2 * self.n_states))
# Markovian transition matrix
markov_tmatrix = | np.zeros((self.n_states, self.n_states)) | numpy.zeros |
#PoseGraph Pose graph
import roboticstoolbox as rtb
import pgraph
from spatialmath import base, SE2
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import zipfile
import time
import math
class PoseGraph:
# properties
# graph
# ngrid
# center
# cellsize
def __init__(self, filename, laser=False, verbose=False):
# parse the file data
# we assume g2o format
# VERTEX* vertex_id X Y THETA
# EDGE* startvertex_id endvertex_id X Y THETA IXX IXY IYY IXT IYT ITT
# vertex numbers start at 0
self.laser = laser
self.graph = pgraph.UGraph(verbose=verbose)
path = rtb.path_to_datafile(filename)
if filename.endswith('.zip'):
zf = zipfile.ZipFile(path, 'r')
opener = zf.open
filename = filename[:-4]
else:
opener = open
filename = path
with opener(filename, 'r') as f:
toroformat = False
nlaser = 0
# indices into ROBOTLASER1 record for the 3x3 info matrix in column major
# order
g2o = [0, 1, 2, 1, 3, 4, 2, 4, 5]
toro = [0, 1, 4, 1, 2, 5, 4, 5, 3]
# we keep an array self. = vindex(gi) to map g2o vertex index to PGraph vertex index
vindex = {}
firstlaser = True
for line in f:
# for zip file, we get data as bytes not str
if isinstance(line, bytes):
line = line.decode()
# is it a comment?
if line.startswith('#'):
continue
tokens = line.split(' ')
# g2o format records
if tokens[0] == 'VERTEX_SE2':
v = self.graph.add_vertex([float(x) for x in tokens[2:5]])
id = int(tokens[1])
vindex[id] = v
v.id = id
v.type = 'vertex'
elif tokens[0] == 'VERTEX_XY':
v = self.graph.add_vertex([float(x) for x in tokens[2:4]])
id = int(tokens[1])
vindex[id] = v
v.id = id
v.type = 'landmark'
elif tokens[0] == 'EDGE_SE2':
v1 = vindex[int(tokens[1])]
v2 = vindex[int(tokens[2])]
# create the edge
e = self.graph.add_edge(v1, v2)
# create the edge data as a structure
# X Y T
# 3 4 5
e.mean = np.array([float(x) for x in tokens[3:6]])
# IXX IXY IXT IYY IYT ITT
# 6 7 8 9 10 11
info = np.array([float(x) for x in tokens[6:12]])
e.info = np.reshape(info[g2o], (3,3))
## TORO format records
elif tokens[0] == 'VERTEX2':
toroformat = True
v = self.graph.add_vertex([float(x) for x in tokens[2:5]])
id = int(tokens[1])
vindex[id] = v
v.id = id
v.type = 'vertex'
elif tokens[0] == 'EDGE2':
toroformat = True
v1 = vindex[int(tokens[1])]
v2 = vindex[int(tokens[2])]
# create the edge
e = self.graph.add_edge(v1, v2)
# create the edge data as a structure
# X Y T
# 3 4 5
e.mean = [float(x) for x in tokens[3:6]]
# IXX IXY IXT IYY IYT ITT
# 6 7 8 9 10 11
info = np.array([float(x) for x in tokens[6:12]])
e.info = np.reshape(info[toro], (3,3))
elif tokens[0] == 'ROBOTLASER1':
if not laser:
continue
# laser records are associated with the immediately preceding VERTEX record
# not quite sure what all the fields are
# 1 ?
# 2 min scan angle
# 3 scan range
# 4 angular increment
# 5 maximum range possible
# 6 ?
# 7 ?
# 8 N = number of beams
# 9 to 9+N laser range data
# 9+N+1 ?
# 9+N+2 ?
# 9+N+3 ?
# 9+N+4 ?
# 9+N+5 ?
# 9+N+6 ?
# 9+N+7 ?
# 9+N+8 ?
# 9+N+9 ?
# 9+N+10 ?
# 9+N+11 ?
# 9+N+12 timestamp (*nix timestamp)
# 9+N+13 laser type (str)
# 9+N+14 ?
if firstlaser:
nbeams = int(tokens[8])
lasermeta = tokens[2:6]
firstlaser = False
v.theta = np.arange(0, nbeams) * float(tokens[4]) + float(tokens[2])
v.range = np.array([float(x) for x in tokens[9:nbeams+9]])
v.time = float(tokens[21+nbeams])
nlaser+= 1
else:
raise RuntimeError(f"Unexpected line {line} in {filename}")
if toroformat:
print(f"loaded TORO/LAGO format file: {self.graph.n} nodes, {self.graph.ne} edges")
else:
print(f"loaded g2o format file: {self.graph.n} nodes, {self.graph.ne} edges")
if nlaser > 0:
lasermeta = [float(x) for x in lasermeta]
self._angmin = lasermeta[0]
self._angmax = sum(lasermeta[0:2])
self._maxrange = lasermeta[3]
fov = np.degrees([self._angmin, self._angmax])
print(f" {nlaser} laser scans: {nbeams} beams, fov {fov[0]:.1f}° to {fov[1]:.1f}°, max range {self._maxrange}")
self.vindex = vindex
def scan(self, i):
v = self.vindex[i]
return v.range, v.theta
def scanxy(self, i):
v = self.vindex[i]
range, theta = self.scan(i)
x = range * | np.cos(theta) | numpy.cos |
"""
This module contains our thermodynamic calculations. Calculation of pressure, fugacity coefficient, and max density are handled by an Eos object so that these functions can be used with any EOS. The thermodynamics module contains a series of wrapper to handle the inputs and outputs of these functions.
"""
import numpy as np
from scipy import interpolate
import scipy.optimize as spo
from scipy.ndimage.filters import gaussian_filter1d
import copy
import logging
import despasito.utils.general_toolbox as gtb
from despasito import fundamental_constants as constants
import despasito.utils.general_toolbox as gtb
logger = logging.getLogger(__name__)
def pressure_vs_volume_arrays(
T,
xi,
Eos,
min_density_fraction=(1.0 / 500000.0),
density_increment=5.0,
max_volume_increment=1.0e-4,
pressure_min=100,
maxiter=25,
multfactor=2,
extended_npts=20,
max_density=None,
density_max_opts={},
**kwargs
):
r"""
Output arrays with specific volume and pressure arrays calculated from the given EOS.
This function is fundamental to every calculation, the options of which are passed through higher level calculation with the keyword variable ``density_opts``.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
min_density_fraction : float, Optional, default=(1.0/500000.0)
Fraction of the maximum density used to calculate, and is equal to, the minimum density of the density array. The minimum density is the reciprocal of the maximum specific volume used to calculate the roots.
density_increment : float, Optional, default=5.0
The increment between density values in the density array.
max_volume_increment : float, Optional, default=1.0E-4
Maximum increment between specific volume array values. After conversion from density to specific volume, the increment values are compared to this value.
pressure_min : float, Optional, default=100
Ensure pressure curve reaches down to this value
multfactor : int, Optional, default=2
Multiplication factor to extend range
extended_npts : int, Optional, default=20
Number of points in extended range
maxiter : int, Optional, default=25
Number of times to multiply range by to obtain full pressure vs. specific volume curve
max_density : float, Optional, default=None
[mol/:math:`m^3`] Maximum molar density defined, if default of None is used then the Eos object method, density_max is used.
density_max_opts : dict, Optional, default={}
Keyword arguments for density_max method for EOS object
Returns
-------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_arrays' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.any(np.isnan(xi)):
raise ValueError("Given mole fractions are NaN")
if isinstance(xi, list):
xi = np.array(xi)
# estimate the maximum density based on the hard sphere packing fraction, part of EOS
if not max_density:
max_density = Eos.density_max(xi, T, **density_max_opts)
elif gtb.isiterable(max_density):
logger.error(
" Maxrho should be type float. Given value: {}".format(max_density)
)
max_density = max_density[0]
if max_density > 1e5:
raise ValueError(
"Max density of {} mol/m^3 is not feasible, check parameters.".format(
max_density
)
)
# min rho is a fraction of max rho, such that minrho << rhogassat
minrho = max_density * min_density_fraction
# list of densities for P,rho and P,v
if (max_density - minrho) < density_increment:
raise ValueError(
"Density range, {}, is less than increment, {}. Check parameters used in Eos.density_max().".format(
(max_density - minrho), density_increment
)
)
rholist = np.arange(minrho, max_density, density_increment)
# check rholist to see when the spacing
vspace = (1.0 / rholist[:-1]) - (1.0 / rholist[1:])
if np.amax(vspace) > max_volume_increment:
vspaceswitch = np.where(vspace > max_volume_increment)[0][-1]
rholist_2 = (
1.0
/ np.arange(
1.0 / rholist[vspaceswitch + 1], 1.0 / minrho, max_volume_increment
)[::-1]
)
rholist = np.append(rholist_2, rholist[vspaceswitch + 2 :])
# compute Pressures (Plist) for rholist
Plist = Eos.pressure(rholist, T, xi)
# Make sure enough of the pressure curve is obtained
for i in range(maxiter):
if Plist[0] > pressure_min:
rhotmp = np.linspace(rholist[0] / 2, rholist[0], extended_npts)[:-1]
Ptmp = Eos.pressure(rhotmp, T, xi)
Plist = np.append(Ptmp, Plist)
rholist = np.append(rhotmp, rholist)
else:
break
# Flip Plist and rholist arrays
Plist = Plist[:][::-1]
rholist = rholist[:][::-1]
vlist = 1.0 / rholist
return vlist, Plist
def pressure_vs_volume_spline(vlist, Plist):
r"""
Fit arrays of specific volume and pressure values to a cubic Univariate Spline.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Returns
-------
Pvspline : obj
Function object of pressure vs. specific volume
roots : list
List of specific volume roots. Subtract a system pressure from the output of Pvsrho to find density of vapor and/or liquid densities.
extrema : list
List of specific volume values corresponding to local minima and maxima.
"""
# Larger sigma value
Psmoothed = gaussian_filter1d(Plist, sigma=1.0e-2)
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed)
roots = Pvspline.roots().tolist()
Pvspline = interpolate.InterpolatedUnivariateSpline(vlist, Psmoothed, k=4)
extrema = Pvspline.derivative().roots().tolist()
if extrema:
if len(extrema) > 2:
extrema = extrema[0:2]
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if np.any(np.isnan(Plist)):
roots = [np.nan]
return Pvspline, roots, extrema
def pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=[], **kwargs):
r"""
Plot pressure vs. specific volume.
Parameters
----------
vlist : numpy.ndarray
[:math:`m^3`/mol] Specific volume array.
Plist : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
Pvspline : obj
Function object of pressure vs. specific volume
markers : list, Optional, default=[]
List of plot markers used in plot
"""
if len(kwargs) > 0:
logger.debug(
" 'pressure_vs_volume_plot' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
try:
import matplotlib.pyplot as plt
plt.figure(1)
plt.plot(vlist, Plist, label="Orig.")
plt.plot(vlist, Pvspline(vlist), label="Smoothed")
plt.plot([vlist[0], vlist[-1]], [0, 0], "k")
for k in range(len(markers)):
plt.plot([markers[k], markers[k]], [min(Plist), max(Plist)], "k")
plt.xlabel("Specific Volume [$m^3$/mol]"), plt.ylabel("Pressure [Pa]")
# plt.ylim(min(Plist)/2,np.abs(min(Plist))/2)
plt.legend(loc="best")
plt.tight_layout()
plt.show()
except Exception:
logger.error("Matplotlib package is not installed, could not plot")
def calc_saturation_properties(
T, xi, Eos, density_opts={}, tol=1e-6, Pconverged=1, **kwargs
):
r"""
Computes the saturated pressure, gas and liquid densities for a single component system.
Parameters
----------
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
tol : float, Optional, default=1e-6
Tolerance to accept pressure value
Pconverged : float, Optional, default=1.0
If the pressure is negative (under tension), we search from a value just above vacuum
Returns
-------
Psat : float
[Pa] Saturation pressure given system information
rhov : float
[mol/:math:`m^3`] Density of vapor at saturation pressure
rhol : float
[mol/:math:`m^3`] Density of liquid at saturation pressure
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_saturation_properties' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.count_nonzero(xi) != 1:
if np.count_nonzero(xi > 0.1) != 1:
raise ValueError(
"Multiple components have compositions greater than 10%, check code for source"
)
else:
ind = np.where((xi > 0.1) == True)[0]
raise ValueError(
"Multiple components have compositions greater than 0. Do you mean to obtain the saturation pressure of {} with a mole fraction of {}?".format(
Eos.beads[ind], xi[ind]
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
if not extrema or len(extrema) < 2 or np.any(np.isnan(roots)):
logger.warning(" The component is above its critical point")
Psat, rhol, rhov = np.nan, np.nan, np.nan
else:
ind_Pmin1 = np.argwhere(np.diff(Plist) > 0)[0][0]
ind_Pmax1 = np.argmax(Plist[ind_Pmin1:]) + ind_Pmin1
Pmaxsearch = Plist[ind_Pmax1]
Pminsearch = max(Pconverged, np.amin(Plist[ind_Pmin1:ind_Pmax1]))
# Using computed Psat find the roots in the maxwell construction to give liquid (first root) and vapor (last root) densities
Psat = spo.minimize_scalar(
objective_saturation_pressure,
args=(Plist, vlist),
bounds=(Pminsearch, Pmaxsearch),
method="bounded",
)
Psat = Psat.x
obj_value = objective_saturation_pressure(Psat, Plist, vlist)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist - Psat)
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
if obj_value < tol:
logger.debug(
" Psat found: {} Pa, obj value: {}, with {} roots and {} extrema".format(
Psat, obj_value, np.size(roots), np.size(extrema)
)
)
if len(roots) == 2:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:] - Psat, 1)
vroot = -yroot / slope
if vroot < 0.0:
vroot = np.finfo(float).eps
rho_tmp = spo.minimize(
pressure_spline_error,
1.0 / vroot,
args=(Psat, T, xi, Eos),
bounds=[(1.0 / (vroot * 1e2), 1.0 / (1.1 * roots[-1]))],
)
roots = np.append(roots, [1.0 / rho_tmp.x])
rhol = 1.0 / roots[0]
rhov = 1.0 / roots[2]
else:
logger.warning(
" Psat NOT found: {} Pa, obj value: {}, consider decreasing 'pressure_min' option in density_opts".format(
Psat, obj_value
)
)
Psat, rhol, rhov = np.nan, np.nan, np.nan
tmpv, _, _ = calc_vapor_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
tmpl, _, _ = calc_liquid_fugacity_coefficient(
Psat, T, xi, Eos, density_opts=density_opts
)
logger.debug(" phiv: {}, phil: {}".format(tmpv, tmpl))
return Psat, rhol, rhov
def objective_saturation_pressure(shift, Pv, vlist):
r"""
Objective function used to calculate the saturation pressure.
Parameters
----------
shift : float
[Pa] Guess in Psat value used to translate the pressure vs. specific volume curve
Pv : numpy.ndarray
[Pa] Pressure associated with specific volume of system with given temperature and composition
vlist : numpy.ndarray
[mol/:math:`m^3`] Specific volume array. Length depends on values in density_opts passed to :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
obj_value : float
Output of objective function, the addition of the positive area between first two roots, and negative area between second and third roots, quantity squared.
"""
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Pv - shift)
if len(roots) >= 3:
a = Pvspline.integral(roots[0], roots[1])
b = Pvspline.integral(roots[1], roots[2])
elif len(roots) == 2:
a = Pvspline.integral(roots[0], roots[1])
# If the curve hasn't decayed to 0 yet, estimate the remaining area as a triangle. This isn't super accurate but we are just using the saturation pressure to get started.
slope, yroot = np.polyfit(vlist[-4:], Pv[-4:] - shift, 1)
b = (
Pvspline.integral(roots[1], vlist[-1])
+ (Pv[-1] - shift) * (-yroot / slope - vlist[-1]) / 2
)
# raise ValueError("Pressure curve only has two roots. If the curve hasn't fully decayed, either increase maximum specific volume or decrease 'pressure_min' in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`.")
elif np.any(np.isnan(roots)):
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing pressure."
)
else:
raise ValueError(
"Pressure curve without cubic properties has wrongly been accepted. Try decreasing min_density_fraction"
)
# pressure_vs_volume_plot(vlist, Pv-shift, Pvspline, markers=extrema)
return (a + b) ** 2
def calc_vapor_density(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes vapor density under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
rhov : float
[mol/:math:`m^3`] Density of vapor at system pressure
flag : int
A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_vapor_density' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Plist = Plist - P
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
logger.debug(" Find rhov: P {} Pa, roots {} m^3/mol".format(P, roots))
flag_NoOpt = False
l_roots = len(roots)
if np.any(np.isnan(roots)):
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
elif l_roots == 0:
if Pvspline(1 / vlist[-1]) < 0:
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vlist[0],
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x
if not len(extrema):
flag = 2
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
flag = 1
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
except Exception:
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure, without density greater than max, {}".format(
T, xi, Eos.density_max(xi, T, maxpack=0.99)
)
)
flag_NoOpt = True
elif min(Plist) + P > 0:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else:
logger.warning(
" Flag 3: The T and yi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
flag = 3
rho_tmp = np.nan
elif l_roots == 1:
if not len(extrema):
flag = 2
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
elif (Pvspline(roots[0]) + P) > (Pvspline(max(extrema)) + P):
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: The T and yi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
elif len(extrema) > 1:
flag = 0
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
elif l_roots == 2:
if (Pvspline(roots[0]) + P) < 0.0:
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: This T and yi, {} {}, combination produces a liquid under tension at this pressure".format(
T, xi
)
)
else:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and yi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else: # 3 roots
logger.debug(
" Flag 0: This T and yi, {} {}, combination produces a vapor at this pressure.".format(
T, xi
)
)
rho_tmp = 1.0 / roots[2]
flag = 0
if flag in [0, 2]: # vapor or critical fluid
tmp = [rho_tmp * 0.99, rho_tmp * 1.01]
if rho_tmp * 1.01 > Eos.density_max(xi, T, maxpack=0.99):
tmp[1] = Eos.density_max(xi, T, maxpack=0.99)
if (
pressure_spline_error(tmp[0], P, T, xi, Eos)
* pressure_spline_error(tmp[1], P, T, xi, Eos)
) < 0:
rho_tmp = spo.brentq(
pressure_spline_error,
tmp[0],
tmp[1],
args=(P, T, xi, Eos),
rtol=0.0000001,
)
else:
if Plist[0] < 0:
logger.warning(
" Density value could not be bounded with (rhomin,rhomax), {}. Using approximate density value".format(
tmp
)
)
elif not flag_NoOpt:
rho_tmp = spo.least_squares(
pressure_spline_error,
rho_tmp,
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x
logger.debug(" Vapor Density: {} mol/m^3, flag {}".format(rho_tmp, flag))
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
# Flag: 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
return rho_tmp, flag
def calc_liquid_density(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes liquid density under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
rhol : float
[mol/:math:`m^3`] Density of liquid at system pressure
flag : int
A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_liquid_density' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
# Get roots and local minima and maxima
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Plist = Plist - P
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
logger.debug(" Find rhol: P {} Pa, roots {} m^3/mol".format(P, str(roots)))
flag_NoOpt = False
if extrema:
if len(extrema) == 1:
logger.warning(
" One extrema at {}, assume weird minima behavior. Check your parameters.".format(
1 / extrema[0]
)
)
# Assess roots, what is the liquid density
l_roots = len(roots)
if np.any(np.isnan(roots)):
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
T, xi
)
)
elif l_roots == 0:
if Pvspline(1 / vlist[-1]):
try:
bounds = (1 / vlist[0], Eos.density_max(xi, T, maxpack=0.99))
rho_tmp = spo.least_squares(
pressure_spline_error,
np.mean(bounds),
args=(P, T, xi, Eos),
bounds=bounds,
)
rho_tmp = rho_tmp.x
if not len(extrema):
flag = 2
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
flag = 1
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
except Exception:
rho_tmp = np.nan
flag = 3
logger.warning(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure, without density greater than max, {}".format(
T, xi, Eos.density_max(xi, T, maxpack=0.99)
)
)
flag_NoOpt = True
elif min(Plist) + P > 0:
slope, yroot = np.polyfit(vlist[-4:], Plist[-4:], 1)
vroot = -yroot / slope
try:
rho_tmp = spo.least_squares(
pressure_spline_error,
1.0 / vroot,
args=(P, T, xi, Eos),
bounds=(np.finfo("float").eps, 1.0 / (1.1 * roots[-1])),
)
rho_tmp = rho_tmp.x
flag = 0
except Exception:
rho_tmp = np.nan
flag = 4
if not len(extrema):
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
else:
logger.debug(
" Flag 0: This T and xi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else:
flag = 3
logger.error(
" Flag 3: The T and xi, {} {}, won't produce a fluid (vapor or liquid) at this pressure".format(
str(T), str(xi)
)
)
rho_tmp = np.nan
# pressure_vs_volume_plot(vlist, Plist, Pvspline, markers=extrema)
elif l_roots == 2: # 2 roots
if (Pvspline(roots[0]) + P) < 0.0:
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: This T and xi, {} {}, combination produces a liquid under tension at this pressure".format(
T, xi
)
)
else: # There should be three roots, but the values of specific volume don't go far enough to pick up the last one
flag = 1
rho_tmp = 1.0 / roots[0]
elif l_roots == 1: # 1 root
if not len(extrema):
flag = 2
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 2: The T and xi, {} {}, combination produces a critical fluid at this pressure".format(
T, xi
)
)
elif (Pvspline(roots[0]) + P) > (Pvspline(max(extrema)) + P):
flag = 1
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
elif len(extrema) > 1:
flag = 0
rho_tmp = 1.0 / roots[0]
logger.debug(
" Flag 0: This T and xi, {} {}, combination produces a vapor at this pressure. Warning! approaching critical fluid".format(
T, xi
)
)
else: # 3 roots
rho_tmp = 1.0 / roots[0]
flag = 1
logger.debug(
" Flag 1: The T and xi, {} {}, combination produces a liquid at this pressure".format(
T, xi
)
)
if flag in [1, 2]: # liquid or critical fluid
tmp = [rho_tmp * 0.99, rho_tmp * 1.01]
P_tmp = [
pressure_spline_error(tmp[0], P, T, xi, Eos),
pressure_spline_error(tmp[1], P, T, xi, Eos),
]
if (P_tmp[0] * P_tmp[1]) < 0:
rho_tmp = spo.brentq(
pressure_spline_error, tmp[0], tmp[1], args=(P, T, xi, Eos), rtol=1e-7
)
else:
if P_tmp[0] < 0:
logger.warning(
" Density value could not be bounded with (rhomin,rhomax), {}. Using approximate density value".format(
tmp
)
)
elif not flag_NoOpt:
rho_tmp = spo.least_squares(
pressure_spline_error,
rho_tmp,
args=(P, T, xi, Eos),
bounds=(
np.finfo("float").eps,
Eos.density_max(xi, T, maxpack=0.99),
),
)
rho_tmp = rho_tmp.x[0]
logger.debug(" Liquid Density: {} mol/m^3, flag {}".format(rho_tmp, flag))
# Flag: 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
return rho_tmp, flag
def pressure_spline_error(rho, Pset, T, xi, Eos):
"""
Calculate difference between set point pressure and computed pressure for a given density.
Used to ensure an accurate value from the EOS rather than an estimate from a spline.
Parameters
----------
rho : float
[mol/:math:`m^3`] Density of system
Pset : float
[Pa] Guess in pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
Returns
-------
pressure_spline_error : float
[Pa] Difference in set pressure and predicted pressure given system conditions.
"""
Pguess = Eos.pressure(rho, T, xi)
return Pguess - Pset
def calc_vapor_fugacity_coefficient(P, T, yi, Eos, density_opts={}, **kwargs):
r"""
Computes vapor fugacity coefficient under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
yi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
phiv : float
Fugacity coefficient of vapor at system pressure
rhov : float
[mol/:math:`m^3`] Density of vapor at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_vapor_fugacity_coefficient' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
rhov, flagv = calc_vapor_density(P, T, yi, Eos, density_opts)
if flagv == 4:
phiv = np.ones_like(yi)
rhov = 0.0
logger.info(" rhov set to 0.")
elif flagv == 3:
phiv = np.array([np.nan, np.nan])
else:
phiv = Eos.fugacity_coefficient(P, rhov, yi, T)
return phiv, rhov, flagv
def calc_liquid_fugacity_coefficient(P, T, xi, Eos, density_opts={}, **kwargs):
r"""
Computes liquid fugacity coefficient under system conditions.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
xi : numpy.ndarray
Mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
phil : float
Fugacity coefficient of liquid at system pressure
rhol : float
[mol/:math:`m^3`] Density of liquid at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true.
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_liquid_fugacity_coefficient' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
rhol, flagl = calc_liquid_density(P, T, xi, Eos, density_opts)
if flagl == 3:
phil = np.array([np.nan, np.nan])
else:
phil = Eos.fugacity_coefficient(P, rhol, xi, T)
return phil, rhol, flagl
def calc_new_mole_fractions(phase_1_mole_fraction, phil, phiv, phase=None):
r"""
Calculate the alternative phase composition given the composition and fugacity coefficients of one phase, and the fugacity coefficients of the target phase.
Parameters
----------
phase_1_mole_fraction : numpy.ndarray
Mole fraction of each component, sum(mole fraction) must equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
phiv : float
Fugacity coefficient of vapor at system pressure
phase : str, default=None
Use either 'vapor' or 'liquid' to define the mole fraction **being computed**. Default is None and it will fail to ensure the user specifies the correct phase
Returns
-------
phase_2_mole_fraction : numpy.ndarray
Mole fraction of each component computed from fugacity coefficients, sum(xi) should equal 1.0 when the solution is found, but the resulting values may not during an equilibrium calculation (e.g. bubble point).
"""
if phase == None or phase not in ["vapor", "liquid"]:
raise ValueError(
"The user must specify the desired mole fraction as either 'vapor' or 'liquid'."
)
if np.sum(phase_1_mole_fraction) != 1.0:
raise ValueError("Given mole fractions must add up to one.")
if np.any(np.isnan(phiv)):
raise ValueError("Vapor fugacity coefficients should not be NaN")
if np.any(np.isnan(phil)):
raise ValueError("Liquid fugacity coefficients should not be NaN")
phase_2_mole_fraction = np.zeros(len(phase_1_mole_fraction))
ind = np.where(phase_1_mole_fraction != 0.0)[0]
if phase == "vapor":
for i in ind:
phase_2_mole_fraction[i] = phase_1_mole_fraction[i] * phil[i] / phiv[i]
elif phase == "liquid":
for i in ind:
phase_2_mole_fraction[i] = phase_1_mole_fraction[i] * phiv[i] / phil[i]
return phase_2_mole_fraction
def equilibrium_objective(phase_1_mole_fraction, phil, phiv, phase=None):
r"""
Computes the objective value used to determine equilibrium between phases. sum(phase_1_mole_fraction * phase_1_phi / phase_2_phi ) - 1.0, where `phase` is phase 2.
Parameters
----------
phase_1_mole_fraction : numpy.ndarray
Mole fraction of each component, sum(mole fraction) must equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
phiv : float
Fugacity coefficient of vapor at system pressure
phase : str, default=None
Use either 'vapor' or 'liquid' to define the mole fraction **being computed**. Default is None and it will fail to ensure the user specifies the correct phase
Returns
-------
objective_value : numpy.ndarray
Objective value indicating how close to equilibrium we are
"""
if phase == None or phase not in ["vapor", "liquid"]:
raise ValueError(
"The user must specify the desired mole fraction as either 'vapor' or 'liquid'."
)
if np.sum(phase_1_mole_fraction) != 1.0:
raise ValueError("Given mole fractions must add up to one.")
if np.any(np.isnan(phiv)):
raise ValueError("Vapor fugacity coefficients should not be NaN")
if np.any(np.isnan(phil)):
raise ValueError("Liquid fugacity coefficients should not be NaN")
if phase == "vapor":
objective_value = float((np.nansum(phase_1_mole_fraction * phil / phiv)) - 1.0)
elif phase == "liquid":
objective_value = float((np.nansum(phase_1_mole_fraction * phiv / phil)) - 1.0)
return objective_value
def _clean_plot_data(x_old, y_old):
r"""
Reorder array and remove duplicates, then repeat process for the corresponding array.
Parameters
----------
x_old : numpy.ndarray
Original independent variable
y_old : numpy.ndarray
Original dependent variable
Returns
-------
x_new : numpy.ndarray
New independent variable
y_new : numpy.ndarray
New dependent variable
"""
x_new = np.sort(np.array(list(set(x_old))))
y_new = np.array([y_old[np.where(np.array(x_old) == x)[0][0]] for x in x_new])
return x_new, y_new
def calc_Prange_xi(
T,
xi,
yi,
Eos,
density_opts={},
Pmin=None,
Pmax=None,
maxiter=200,
mole_fraction_options={},
ptol=1e-2,
xytol=0.01,
maxfactor=2,
minfactor=0.5,
Pmin_allowed=100,
**kwargs
):
r"""
Obtain minimum and maximum pressure values for bubble point calculation.
The liquid mole fraction is set and the objective function at each of those values is of opposite sign.
Parameters
----------
T : float
Temperature of the system [K]
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : float, Optional, default=200
Maximum number of iterations in both the loop to find Pmin and the loop to find Pmax
Pmin : float, Optional, default=1000.0
[Pa] Minimum pressure in pressure range that restricts searched space.
Pmax : float, Optional, default=100000
If no local minima or maxima are identified for the liquid composition at this temperature, this value is used as an initial estimate of the maximum pressure range.
Pmin_allowed : float, Optional, default=100
Minimum allowed pressure in search, before looking for a super critical fluid
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
ptol : float, Optional, default=1e-2
If two iterations in the search for the maximum pressure are within this tolerance, the search is discontinued
xytol : float, Optional, default=0.01
If the sum of absolute relative difference between the vapor and liquid mole fractions are less than this total, the pressure is assumed to be super critical and the maximum pressure is sought at a lower value.
maxfactor : float, Optional, default=2
Factor to multiply by the pressure if it is too low (produces liquid or positive objective value). Not used if an unfeasible maximum pressure is found to bound the problem (critical for NaN result).
minfactor : float, Optional, default=0.5
Factor to multiply by the minimum pressure if it is too high (produces critical value).
Returns
-------
Prange : list
List of min and max pressure range
Pguess : float
An interpolated guess in the equilibrium pressure from Prange
"""
if len(kwargs) > 0:
logger.debug(
"'calc_Prange_xi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _yi_global
# Guess a range from Pmin to the local max of the liquid curve
vlist, Plist = pressure_vs_volume_arrays(T, xi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
flag_hard_min = False
if Pmin != None:
flag_hard_min = True
if gtb.isiterable(Pmin):
Pmin = Pmin[0]
elif len(extrema):
Pmin = min(Pvspline(extrema))
if Pmin < 0:
Pmin = 1e3
else:
Pmin = 1e3
flag_hard_max = False
if Pmax != None:
flag_hard_max = True
if gtb.isiterable(Pmax):
Pmax = Pmax[0]
elif len(extrema):
Pmax = max(Pvspline(extrema))
else:
Pmax = 1e5
if Pmax < Pmin:
Pmax = Pmin * maxfactor
Prange = np.array([Pmin, Pmax])
#################### Find Minimum Pressure and Objective Function Value ###############
# Root of min from liquid curve is absolute minimum
ObjRange = np.zeros(2)
yi_range = yi
flag_max = False
flag_min = False
flag_critical = False
flag_liquid = False
flag_vapor = False
p = Prange[0]
for z in range(maxiter):
# Liquid properties
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
p, T, xi, Eos, density_opts=density_opts
)
if any(np.isnan(phil)):
logger.error("Estimated minimum pressure is too high.")
flag_max = True
flag_liquid = True
ObjRange[1] = np.inf
Prange[1] = p
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = minfactor * p
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
continue
if flagl in [1, 2]: # 'liquid' phase is as expected
# Calculate vapor phase properties and obj value
yi_range, phiv_min, flagv_min = calc_vapor_composition(
yi_range,
xi,
phil,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(xi, phil, phiv_min, phase="vapor")
if np.any(np.isnan(yi_range)):
logger.info("Estimated minimum pressure produces NaN")
flag_max = True
flag_liquid = True
Prange[1] = p
ObjRange[1] = obj
phiv_max, flagv_max = phiv_min, flagv_min
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
# If within tolerance of liquid mole fraction
elif np.sum(np.abs(xi - yi_range) / xi) < xytol and flagv_min == 2:
logger.info(
"Estimated minimum pressure reproduces xi: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if (
flag_max or flag_hard_max
) and flag_liquid: # If a liquid phase exists at a higher pressure, this must bound the lower pressure
flag_min = True
ObjRange[0] = obj
Prange[0] = p
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
if np.abs(Prange[1] - Prange[0]) < ptol:
flag_critical = True
flag_max = False
ObjRange = [np.inf, np.inf]
Prange = [Pmin, Pmax]
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * Pmin
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
elif (
flag_min or flag_hard_min
) and flag_vapor: # If the 'liquid' phase is vapor at a lower pressure, this must bound the upper pressure
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phiv_max, flagv_max = phiv_min, flagv_min
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
elif (
flag_critical
): # Couldn't find phase by lowering pressure, now raise it
ObjRange[0] = obj
Prange[0] = p
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * p
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else:
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phiv_max, flagv_max = phiv_min, flagv_min
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = minfactor * p
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
if p < Pmin_allowed: # Less than a kPa and can't find phase, go up
flag_critical = True
flag_max = False
ObjRange = [np.inf, np.inf]
Prange = [Pmin, Pmax]
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * Pmin
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
# If 'vapor' phase is liquid or unattainable
elif flagv_min not in [0, 2, 4]:
logger.info(
"Estimated minimum pressure produces liquid: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if flag_hard_min and p <= Pmin:
flag_critical = True
if flag_max:
flag_max = False
flag_liquid = True
if flag_critical: # Looking for a super critical fluid
Prange[0] = p
ObjRange[0] = obj
flag_min = True
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * maxfactor
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else: # Looking for a vapor
Prange[1] = p
ObjRange[1] = obj
flag_max = True
phiv_max, flagv_max = phiv_min, flagv_min
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
# Found minimum pressure!
elif obj > 0:
logger.info(
"Found estimated minimum pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
Prange[0] = p
ObjRange[0] = obj
break
elif obj < 0:
logger.info(
"Estimated minimum pressure too high: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
flag_liquid = True
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phiv_max, flagv_max = phiv_min, flagv_min
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
else:
raise ValueError(
"This shouldn't happen: xi {}, phil {}, flagl {}, yi {}, phiv {}, flagv {}, obj {}, flags: {} {} {}".format(
xi,
phil,
flagl,
yi_range,
phiv_min,
flagv_min,
obj,
flag_min,
flag_max,
flag_critical,
)
)
else:
logger.info(
"Estimated minimum pressure produced vapor as a 'liquid' phase: {}, Range {}".format(
p, Prange
)
)
flag_vapor = True
flag_min = True
Prange[0] = p
ObjRange[0] = np.nan
if flag_max or flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = maxfactor * Prange[0]
if (
(flag_hard_min or flag_min)
and (flag_hard_max or flag_max)
and (p < Prange[0] or p > Prange[1])
):
# if (p < Prange[0] and Prange[0] != Prange[1]) or (flag_max and p > Prange[1]):
p = (Prange[1] - Prange[0]) / 1 + Prange[0]
if p <= 0.0:
raise ValueError(
"Pressure, {}, cannot be equal to or less than zero. Given composition, {}, and T {}".format(
p, xi, T
)
)
if flag_hard_min and Pmin == p:
raise ValueError(
"In searching for the minimum pressure, the range {}, converged without a solution".format(
Prange
)
)
if z == maxiter - 1:
raise ValueError(
"Maximum Number of Iterations Reached: Proper minimum pressure for liquid density could not be found"
)
# A flag value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means we should assume ideal gas
#################### Find Maximum Pressure and Objective Function Value ###############
# Be sure guess in upper bound is larger than lower bound
if Prange[1] <= Prange[0]:
Prange[1] = Prange[0] * maxfactor
ObjRange[1] == 0.0
flag_min = (
False
) # Signals that the objective value starts to increase again and we must go back
p = Prange[1]
Parray = [Prange[1]]
ObjArray = [ObjRange[1]]
for z in range(maxiter):
# Liquid properties
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
p, T, xi, Eos, density_opts=density_opts
)
if any(np.isnan(phil)):
logger.info(
"Liquid fugacity coefficient should not be NaN, pressure could be too high."
)
flag_max = True
Prange[1] = p
ObjRange[1] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
continue
# Calculate vapor phase properties and obj value
yi_range, phiv_max, flagv_max = calc_vapor_composition(
yi_range,
xi,
phil,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(xi, phil, phiv_max, phase="vapor")
# If 'vapor' phase is a liquid
if flagv_max not in [0, 2, 4] or np.any(np.isnan(yi_range)):
logger.info(
"New Maximum Pressure: {} isn't vapor, flag={}, Obj Func: {}, Range {}".format(
p, flagv_max, obj, Prange
)
)
if flag_critical: # looking for critical fluid
Prange[0] = p
ObjRange[0] = obj
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * maxfactor
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else: # Looking for vapor phase
flag_max = True
Prange[1] = p
ObjRange[1] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
# If 'liquid' composition is reproduced
elif np.sum(np.abs(xi - yi_range) / xi) < xytol: # If less than 2%
logger.info(
"Estimated Maximum Pressure Reproduces xi: {}, Obj. Func: {}".format(
p, obj
)
)
flag_max = True
ObjRange[1] = obj
Prange[1] = p
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
# Suitable objective value found
elif obj < 0:
logger.info(
"New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format(
p, flagv_max, obj, Prange
)
)
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_min = False
break
else:
Parray.append(p)
ObjArray.append(obj)
# In an objective value "well"
if (z > 0 and ObjArray[-1] > 1.1 * ObjArray[-2]) or flag_min:
if not flag_min:
flag_min = True
Prange[1] = p
ObjRange[1] = obj
logger.info(
"Maximum Pressure (if it exists) between Pressure: {} and Obj Range: {}".format(
Prange, ObjRange
)
)
P0 = np.mean(Prange)
scale_factor = 10 ** (np.ceil(np.log10(P0)))
args = (xi, T, Eos, density_opts, mole_fraction_options, scale_factor)
p = gtb.solve_root(
lambda x, xi, T, Eos, density_opts, mole_fraction_options, scale_factor: objective_bubble_pressure(
x * scale_factor,
xi,
T,
Eos,
density_opts,
mole_fraction_options,
),
args=args,
x0=P0 / scale_factor,
method="TNC",
bounds=Prange / scale_factor,
)
p = p[0] * scale_factor
obj = objective_bubble_pressure(
p,
xi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if p < 0:
parray = np.linspace(Prange[0], Prange[1], 20)
obj_array = []
for ptmp in parray:
obj_tmp = objective_dew_pressure(
ptmp,
yi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
obj_array.append(obj_tmp)
spline = interpolate.Akima1DInterpolator(parray, obj_array)
p_min = spline.derivative().roots()
if len(p_min) > 1:
obj_tmp = []
for p_min_tmp in p_min:
obj_tmp.append(
objective_bubble_pressure(
p_min_tmp, xi, T, Eos, density_opts=density_opts
)
)
p_min = p_min[obj_tmp == np.nanmin(obj_tmp)]
elif len(p_min) == 0:
logger.error(
"Could not find minimum in pressure range:\n Pressure: {}\n Obj Value: {}".format(
parray, obj_array
)
)
p = p_min
obj = objective_bubble_pressure(
p, xi, T, Eos, density_opts=density_opts
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if obj > 0:
Prange[1] = p
ObjRange[1] = obj
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_min = False
else:
logger.error(
"Could not find maximum in pressure range:\n Pressure range {} best {}\n Obj Value range {} best {}".format(
Prange, p, ObjRange, obj
)
)
break
elif flag_max:
logger.info(
"New Minimum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
Prange[0] = p
ObjRange[0] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
logger.info(
"New Maximum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if not flag_hard_max:
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
if flag_hard_max:
p = (Prange[1] - Prange[0]) * np.random.rand(1)[0] + Prange[0]
else:
p = np.nanmax([-intercept / slope, maxfactor * Prange[1]])
if p <= 0.0:
raise ValueError(
"Pressure, {}, cannot be equal to or less than zero. Given composition, {}, and T {}".format(
p, xi, T
)
)
if np.abs(Prange[1] - Prange[0]) < ptol:
raise ValueError(
"In searching for the minimum pressure, the range {}, converged without a solution".format(
Prange
)
)
if z == maxiter - 1 or flag_min:
if flag_min:
logger.error(
"Cannot reach objective value of zero. Final Pressure: {}, Obj. Func: {}".format(
p, obj
)
)
else:
logger.error(
"Maximum Number of Iterations Reached: A change in sign for the objective function could not be found, inspect progress"
)
Prange = np.array([np.nan, np.nan])
Pguess = np.nan
else:
logger.info(
"[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange))
)
logger.info("Initial guess in pressure: {} Pa".format(Pguess))
_yi_global = yi_range
return Prange, Pguess
def calc_Prange_yi(
T,
xi,
yi,
Eos,
density_opts={},
mole_fraction_options={},
Pmin=None,
Pmax=None,
Pmin_allowed=100,
maxiter=200,
ptol=1e-2,
xytol=0.01,
maxfactor=2,
minfactor=0.5,
**kwargs
):
r"""
Obtain min and max pressure values.
The vapor mole fraction is set and the objective function at each of those values is of opposite sign.
Parameters
----------
T : float
Temperature of the system [K]
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : float, Optional, default=200
Maximum number of iterations in both the loop to find Pmin and the loop to find Pmax
Pmin : float, Optional, default=1000.0
[Pa] Minimum pressure in pressure range that restricts searched space. Used if local minimum isn't available for pressure curve for vapor composition.
Pmax : float, Optional, default=100000
If no local minima or maxima are identified for the liquid composition at this temperature, this value is used as an initial estimate of the maximum pressure range.
Pmin_allowed : float, Optional, default=100
Minimum allowed pressure in search, before looking for a super critical fluid
mole_fraction_options : dict, Optional, default={}
Options used to solve the inner loop in the solving algorithm
ptol : float, Optional, default=1e-2
If two iterations in the search for the maximum pressure are within this tolerance, the search is discontinued
xytol : float, Optional, default=0.01
If the sum of absolute relative difference between the vapor and liquid mole fractions are less than this total, the pressure is assumed to be super critical and the maximum pressure is sought at a lower value.
maxfactor : float, Optional, default=2
Factor to multiply by the pressure if it is too low (produces liquid or positive objective value). Not used if an unfeasible maximum pressure is found to bound the problem (critical for NaN result).
minfactor : float, Optional, default=0.5
Factor to multiply by the minimum pressure if it is too high (produces critical value).
Returns
-------
Prange : list
List of min and max pressure range
Pguess : float
An interpolated guess in the equilibrium pressure from Prange
"""
if len(kwargs) > 0:
logger.debug(
"'calc_Prange_yi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
global _xi_global
# Guess a range from Pmin to the local max of the liquid curve
vlist, Plist = pressure_vs_volume_arrays(T, yi, Eos, **density_opts)
Pvspline, roots, extrema = pressure_vs_volume_spline(vlist, Plist)
# Calculation the highest pressure possible
flag_hard_min = False
if Pmin != None:
flag_hard_min = True
if gtb.isiterable(Pmin):
Pmin = Pmin[0]
elif len(extrema):
Pmin = min(Pvspline(extrema))
if Pmin < 0:
Pmin = 1e3
else:
Pmin = 1e3
flag_hard_max = False
if Pmax != None:
flag_hard_max = True
if gtb.isiterable(Pmax):
Pmax = Pmax[0]
elif len(extrema):
Pmax = max(Pvspline(extrema))
else:
Pmax = 1e5
if Pmax < Pmin:
Pmax = Pmin * maxfactor
Prange = np.array([Pmin, Pmax])
ObjRange = np.zeros(2)
xi_range = xi
#################### Find Minimum Pressure and Objective Function Value ###############
flag_min = False
flag_max = False
flag_critical = False
flag_vapor = False
p = Prange[0]
for z in range(maxiter):
# Vapor properties
phiv, _, flagv = calc_vapor_fugacity_coefficient(
p, T, yi, Eos, density_opts=density_opts
)
if any(np.isnan(phiv)):
logger.error("Estimated minimum pressure is too high.")
flag_max = True
ObjRange[1] = np.inf
Prange[1] = p
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = minfactor * p
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
continue
if flagv in [0, 2, 4]:
# Calculate the liquid phase properties
xi_range, phil_min, flagl_min = calc_liquid_composition(
xi_range,
yi,
phiv,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(yi, phil_min, phiv, phase="liquid")
if np.any(np.isnan(xi_range)):
logger.info("Estimated Minimum Pressure produces NaN")
flag_max = True
flag_vapor = True
Prange[1] = p
ObjRange[1] = obj
if flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
elif (
np.sum(np.abs(yi - xi_range) / yi) < xytol and flagl_min == 2
): # If within 2% of liquid mole fraction
logger.info(
"Estimated Minimum Pressure Reproduces yi: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if (
flag_critical
): # Couldn't find phase by lowering pressure, now raise it
ObjRange[0] = obj
Prange[0] = p
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * p
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else:
flag_max = True
ObjRange[1] = obj
Prange[1] = p
phil_max, flagl_max = phil_min, flagl_min
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = minfactor * p
if p < Pmin_allowed: # Less than a kPa and can't find phase, go up
flag_critical = True
flag_max = False
ObjRange = [np.inf, np.inf]
Prange = [Pmin, Pmax]
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
p = maxfactor * Pmin
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
elif obj < 0:
Prange[0] = p
ObjRange[0] = obj
logger.info(
"Obtained estimated Minimum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
break
elif obj > 0:
flag_max = True
logger.info(
"Estimated Minimum Pressure too High: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
ObjRange[1] = obj
Prange[1] = p
phil_max, flagl_max = phil_min, flagl_min
p = (Prange[1] - Prange[0]) * minfactor + Prange[0]
else:
logger.info(
"Estimated Minimum Pressure Produced Liquid instead of Vapor Phase: {}, Range {}".format(
p, Prange
)
)
if flag_hard_min and p <= Pmin:
flag_critical = True
if flag_max:
flag_max = False
if flag_critical: # Looking for a super critical fluid
Prange[0] = p
ObjRange[0] = obj
flag_min = True
if flag_hard_max:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * maxfactor
if p > Prange[1]:
Prange[1] = p
ObjRange[1] = np.nan
else: # Looking for a vapor
Prange[1] = p
ObjRange[1] = obj
flag_max = True
if flag_min or flag_hard_min:
p = (Prange[1] - Prange[0]) / 2 + Prange[0]
else:
p = p * minfactor
if p < Prange[0]:
Prange[0] = p
ObjRange[0] = np.nan
if Prange[0] > Prange[1]:
if flag_max and not flag_min and not flag_hard_min:
Prange[0] = minfactor * Prange[1]
ObjRange[0] = ObjRange[1]
elif not flag_hard_max:
Prange[1] = maxfactor * Prange[0]
ObjRange[1] = ObjRange[0]
else:
raise ValueError("Pmin should never be greater than Pmax")
if (
(flag_max or flag_hard_max)
and (flag_min or flag_hard_min)
and not Prange[0] <= p <= Prange[1]
):
p = (Prange[1] - Prange[0]) * np.random.rand(1)[0] + Prange[0]
if flag_hard_min and Pmin == p:
raise ValueError(
"In searching for the minimum pressure, the range {}, converged without a solution".format(
Prange
)
)
if p <= 0.0:
raise ValueError(
"Pressure, {}, cannot be equal to or less than zero. Given composition, {}, and T {}, results in a supercritical value without a coexistent fluid.".format(
p, xi, T
)
)
if z == maxiter - 1:
raise ValueError(
"Maximum Number of Iterations Reached: Proper minimum pressure for liquid density could not be found"
)
# Be sure guess in pressure is larger than lower bound
if Prange[1] <= Prange[0]:
Prange[1] = Prange[0] * 1.1
if z == 0:
ObjRange[1] == 0.0
## Check Pmax
flag_sol = False
flag_vapor = False
flag_min = False
p = Prange[1]
Parray = [Prange[1]]
ObjArray = [ObjRange[1]]
for z in range(maxiter):
# Calculate objective value
phiv, _, flagv = calc_vapor_fugacity_coefficient(
p, T, yi, Eos, density_opts=density_opts
)
xi_range, phil, flagl = calc_liquid_composition(
xi_range,
yi,
phiv,
p,
T,
Eos,
density_opts=density_opts,
**mole_fraction_options
)
obj = equilibrium_objective(yi, phil, phiv, phase="liquid")
if z == 0:
ObjRange[1] = obj
if flagv not in [0, 2, 4]: # Ensure vapor is produced
flag_vapor = True
Prange[1] = p
ObjRange[1] = obj
logger.info(
"New Max Pressure: {} doesn't produce vapor, flag={}, Obj Func: {}, Range {}".format(
Prange[1], flagv, ObjRange[1], Prange
)
)
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
elif obj > 0: # Check pressure range
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
logger.info(
"New Max Pressure: {}, flag={}, Obj Func: {}, Range {}".format(
Prange[1], flagv, ObjRange[1], Prange
)
)
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_sol = True
flag_min = False
break
elif flag_vapor:
Prange[0] = p
ObjRange[0] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
logger.info(
"New Max Pressure: {}, Obj. Func: {}, Range {}".format(
Prange[0], ObjRange[0], Prange
)
)
else:
Parray.append(p)
ObjArray.append(obj)
# In an objective value "well"
if (z > 0 and ObjArray[-1] < 1.1 * ObjArray[-2]) or flag_min:
if not flag_min:
flag_min = True
Prange[1] = p
ObjRange[1] = obj
logger.info(
"Maximum Pressure (if it exists) between Pressure: {} and Obj Range: {}".format(
Prange, ObjRange
)
)
P0 = np.mean(Prange)
scale_factor = 10 ** (np.ceil(np.log10(P0)))
args = (yi, T, Eos, density_opts, mole_fraction_options, scale_factor)
p = gtb.solve_root(
lambda x, yi, T, Eos, density_opts, mole_fraction_options, scale_factor: -objective_dew_pressure(
x * scale_factor,
yi,
T,
Eos,
density_opts,
mole_fraction_options,
),
args=args,
x0=P0 / scale_factor,
method="TNC",
bounds=Prange / scale_factor,
)
p = p[0] * scale_factor
obj = objective_dew_pressure(
p,
yi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if p < 0:
parray = np.linspace(Prange[0], Prange[1], 20)
obj_array = []
for ptmp in parray:
obj_tmp = objective_dew_pressure(
ptmp,
yi,
T,
Eos,
density_opts=density_opts,
mole_fraction_options=mole_fraction_options,
)
obj_array.append(obj_tmp)
spline = interpolate.Akima1DInterpolator(parray, obj_array)
p_min = spline.derivative().roots()
if len(p_min) > 1:
obj_tmp = []
for p_min_tmp in p_min:
obj_tmp.append(
objective_bubble_pressure(
p_min_tmp, xi, T, Eos, density_opts=density_opts
)
)
p_min = p_min[obj_tmp == np.nanmin(obj_tmp)]
elif len(p_min) == 0:
logger.error(
"Could not find minimum in pressure range:\n Pressure: {}\n Obj Value: {}".format(
parray, obj_array
)
)
p = p_min
obj = objective_bubble_pressure(
p, xi, T, Eos, density_opts=density_opts
)
logger.info(
"New Max Pressure: {}, Obj Func: {}, Range {}".format(
p, obj, Prange
)
)
if obj > 0:
Prange[1] = p
ObjRange[1] = obj
logger.info("Got the pressure range!")
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
Pguess = -intercept / slope
flag_min = False
else:
logger.error(
"Could not find maximum in pressure range:\n Pressure range {} best {}\n Obj Value range {} best {}".format(
Prange, p, ObjRange, obj
)
)
break
elif flag_hard_max:
logger.info(
"New Minimum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
Prange[0] = p
ObjRange[0] = obj
p = (Prange[1] - Prange[0]) / 2.0 + Prange[0]
else:
logger.info(
"New Maximum Pressure: {}, Obj. Func: {}, Range {}".format(
p, obj, Prange
)
)
if not flag_hard_max:
if Prange[1] < p:
Prange[0] = Prange[1]
ObjRange[0] = ObjRange[1]
Prange[1] = p
ObjRange[1] = obj
slope = (ObjRange[1] - ObjRange[0]) / (Prange[1] - Prange[0])
intercept = ObjRange[1] - slope * Prange[1]
p = np.nanmax([-intercept / slope, maxfactor * Prange[1]])
if z == maxiter - 1 or flag_min:
if flag_min:
logger.error(
"Cannot reach objective value of zero. Final Pressure: {}, Obj. Func: {}".format(
p, obj
)
)
else:
logger.error(
"Maximum Number of Iterations Reached: A change in sign for the objective function could not be found, inspect progress"
)
Prange = np.array([np.nan, np.nan])
Pguess = np.nan
elif flag_sol:
logger.info(
"[Pmin, Pmax]: {}, Obj. Values: {}".format(str(Prange), str(ObjRange))
)
logger.info("Initial guess in pressure: {} Pa".format(Pguess))
else:
logger.error(
"Maximum Number of Iterations Reached: A change in sign for the objective function could not be found, inspect progress"
)
_xi_global = xi_range
return Prange, Pguess
def calc_vapor_composition(
yi,
xi,
phil,
P,
T,
Eos,
density_opts={},
maxiter=50,
tol=1e-6,
tol_trivial=0.05,
**kwargs
):
r"""
Find vapor mole fraction given pressure, liquid mole fraction, and temperature.
Objective function is the sum of the predicted "mole numbers" predicted by the computed fugacity coefficients. Note that by "mole number" we mean that the prediction will only sum to one when the correct pressure is chosen in the outer loop. In this inner loop, we seek to find a mole fraction that is converged to reproduce itself in a prediction. If it hasn't, the new "mole numbers" are normalized into mole fractions and used as the next guess.
In the case that a guess doesn't produce a gas or critical fluid, we use another function to produce a new guess.
Parameters
----------
yi : numpy.ndarray
Guess in vapor mole fraction of each component, sum(xi) should equal 1.0
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : int, Optional, default=50
Maximum number of iteration for both the outer pressure and inner vapor mole fraction loops
tol : float, Optional, default=1e-6
Tolerance in sum of predicted yi "mole numbers"
tol_trivial : float, Optional, default=0.05
If the vapor and liquid mole fractions are within this tolerance, search for a different composition
kwargs : NA, Optional
Other other keyword arguments for :func:`~despasito.thermodynamics.calc.find_new_yi`
Returns
-------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
phiv : float
Fugacity coefficient of vapor at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
"""
if np.any(np.isnan(phil)):
raise ValueError(
"Cannot obtain vapor mole fraction with fugacity coefficients of NaN"
)
global _yi_global
yi_total = [np.sum(yi)]
yi /= np.sum(yi)
flag_check_vapor = True # Make sure we only search for vapor compositions once
flag_trivial_sol = (
True
) # Make sure we only try to find alternative to trivial solution once
logger.info(" Solve yi: P {}, T {}, xi {}, phil {}".format(P, T, xi, phil))
for z in range(maxiter):
yi_tmp = yi / np.sum(yi)
# Try yi
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi_tmp, Eos, density_opts=density_opts
)
if (
any(np.isnan(phiv)) or flagv == 1
) and flag_check_vapor: # If vapor density doesn't exist
flag_check_vapor = False
if all(yi_tmp != 0.0) and len(yi_tmp) == 2:
logger.debug(" Composition doesn't produce a vapor, let's find one!")
yi_tmp = find_new_yi(
P, T, phil, xi, Eos, density_opts=density_opts, **kwargs
)
flag_trivial_sol = False
if np.any(np.isnan(yi_tmp)):
phiv, _, flagv = [np.nan, np.nan, 3]
yinew = yi_tmp
break
else:
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi_tmp, Eos, density_opts=density_opts
)
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
else:
logger.debug(
" Composition doesn't produce a vapor, we need a function to search compositions for more than two components."
)
yinew = yi
elif np.sum(np.abs(xi - yi_tmp) / xi) < tol_trivial and flag_trivial_sol:
flag_trivial_sol = False
if all(yi_tmp != 0.0) and len(yi_tmp) == 2:
logger.debug(
" Composition produces trivial solution, let's find a different one!"
)
yi_tmp = find_new_yi(
P, T, phil, xi, Eos, density_opts=density_opts, **kwargs
)
flag_check_vapor = False
else:
logger.debug(
" Composition produces trivial solution, using random guess to reset"
)
yi_tmp = np.random.rand(len(yi_tmp))
yi_tmp /= np.sum(yi_tmp)
if np.any(np.isnan(yi_tmp)):
phiv, _, flagv = [np.nan, np.nan, 3]
yinew = yi_tmp
break
else:
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi_tmp, Eos, density_opts=density_opts
)
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
else:
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
yinew[np.isnan(yinew)] = 0.0
yi2 = yinew / np.sum(yinew)
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
if any(np.isnan(phiv)):
phiv = np.nan
logger.error(
"Fugacity coefficient of vapor should not be NaN, pressure could be too high."
)
# Check for bouncing between values
if len(yi_total) > 3:
tmp1 = np.abs(np.sum(yinew) - yi_total[-2]) + np.abs(
yi_total[-1] - yi_total[-3]
)
if tmp1 < np.abs(np.sum(yinew) - yi_total[-1]) and flagv != flagv2:
logger.debug(
" Composition bouncing between values, let's find the answer!"
)
bounds = np.sort([yi_tmp[0], yi2[0]])
yi2, obj = bracket_bounding_yi(
P, T, phil, xi, Eos, bounds=bounds, density_opts=density_opts
)
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
_yi_global = yi2
logger.info(
" Inner Loop Final (from bracketing bouncing values) yi: {}, Final Error on Smallest Fraction: {}".format(
yi2, obj
)
)
break
logger.debug(
" yi guess {}, yi calc {}, phiv {}, flag {}".format(
yi_tmp, yinew, phiv, flagv
)
)
logger.debug(
" Old yi_total: {}, New yi_total: {}, Change: {}".format(
yi_total[-1], np.sum(yinew), np.sum(yinew) - yi_total[-1]
)
)
# Check convergence
if abs(np.sum(yinew) - yi_total[-1]) < tol:
ind_tmp = np.where(yi_tmp == min(yi_tmp[yi_tmp > 0]))[0]
if np.abs(yi2[ind_tmp] - yi_tmp[ind_tmp]) / yi_tmp[ind_tmp] < tol:
_yi_global = yi2
logger.info(
" Inner Loop Final yi: {}, Final Error on Smallest Fraction: {}%".format(
yi2,
np.abs(yi2[ind_tmp] - yi_tmp[ind_tmp]) / yi_tmp[ind_tmp] * 100,
)
)
break
if z < maxiter - 1:
yi_total.append(np.sum(yinew))
yi = yinew
## If yi wasn't found in defined number of iterations
ind_tmp = np.where(yi_tmp == min(yi_tmp[yi_tmp > 0.0]))[0]
if flagv == 3:
yi2 = yinew / np.sum(yinew)
logger.info(" Could not converged mole fraction")
phiv2 = np.full(len(yi_tmp), np.nan)
flagv2 = np.nan
elif z == maxiter - 1:
yi2 = yinew / np.sum(yinew)
tmp = np.abs(yi2[ind_tmp] - yi_tmp[ind_tmp]) / yi_tmp[ind_tmp]
logger.warning(
" More than {} iterations needed. Error in Smallest Fraction: {}%".format(
maxiter, tmp * 100
)
)
if tmp > 0.1: # If difference is greater than 10%
yinew = find_new_yi(
P, T, phil, xi, Eos, density_opts=density_opts, **kwargs
)
yi2 = yinew / np.sum(yinew)
y1 = spo.least_squares(
objective_find_yi,
yi2[0],
bounds=(0.0, 1.0),
args=(P, T, phil, xi, Eos, density_opts),
)
yi = y1.x[0]
yi2 = np.array([yi, 1 - yi])
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
obj = objective_find_yi(yi2, P, T, phil, xi, Eos, density_opts=density_opts)
logger.warning(
" Find yi with root algorithm, yi {}, obj {}".format(yi2, obj)
)
if obj > tol:
logger.error("Could not converge mole fraction")
phiv2 = np.full(len(yi_tmp), np.nan)
flagv2 = 3
return yi2, phiv2, flagv2
def calc_liquid_composition(
xi,
yi,
phiv,
P,
T,
Eos,
density_opts={},
maxiter=20,
tol=1e-6,
tol_trivial=0.05,
**kwargs
):
r"""
Find liquid mole fraction given pressure, vapor mole fraction, and temperature.
Objective function is the sum of the predicted "mole numbers" predicted by the computed fugacity coefficients. Note that by "mole number" we mean that the prediction will only sum to one when the correct pressure is chosen in the outer loop. In this inner loop, we seek to find a mole fraction that is converged to reproduce itself in a prediction. If it hasn't, the new "mole numbers" are normalized into mole fractions and used as the next guess.
In the case that a guess doesn't produce a liquid or critical fluid, we use another function to produce a new guess.
Parameters
----------
xi : numpy.ndarray
Guess in liquid mole fraction of each component, sum(xi) should equal 1.0
yi : numpy.ndarray
Vapor mole fraction of each component, sum(xi) should equal 1.0
phiv : float
Fugacity coefficient of liquid at system pressure
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
maxiter : int, Optional, default=20
Maximum number of iteration for both the outer pressure and inner vapor mole fraction loops
tol : float, Optional, default=1e-6
Tolerance in sum of predicted xi "mole numbers"
tol_trivial : float, Optional, default=0.05
If the vapor and liquid mole fractions are within this tolerance, search for a different composition
kwargs : dict, Optional
Optional keywords for :func:`~despasito.thermodynamics.calc.find_new_xi`
Returns
-------
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
phil : float
Fugacity coefficient of liquid at system pressure
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true
"""
global _xi_global
if np.any(np.isnan(phiv)):
raise ValueError(
"Cannot obtain liquid mole fraction with fugacity coefficients of NaN"
)
xi /= np.sum(xi)
xi_total = [np.sum(xi)]
flag_check_liquid = True # Make sure we only search for liquid compositions once
flag_trivial_sol = (
True
) # Make sure we only try to find alternative to trivial solution once
logger.info(" Solve xi: P {}, T {}, yi {}, phiv {}".format(P, T, yi, phiv))
for z in range(maxiter):
xi_tmp = xi / np.sum(xi)
# Try xi
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi_tmp, Eos, density_opts=density_opts
)
if (any(np.isnan(phil)) or flagl in [0, 4]) and flag_check_liquid:
flag_check_liquid = False
if all(xi_tmp != 0.0) and len(xi_tmp) == 2:
logger.debug(
" Composition doesn't produce a liquid, let's find one!"
)
xi_tmp = find_new_xi(
P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs
)
flag_trivial_sol = False
if np.any(np.isnan(xi_tmp)):
phil, rhol, flagl = [np.nan, np.nan, 3]
xinew = xi_tmp
break
else:
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi_tmp, Eos, density_opts=density_opts
)
xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid")
else:
logger.debug(
" Composition doesn't produce a liquid, we need a function to search compositions for more than two components."
)
xinew = xi
elif np.sum(np.abs(yi - xi_tmp) / yi) < tol_trivial and flag_trivial_sol:
flag_trivial_sol = False
if all(xi_tmp != 0.0) and len(xi_tmp) == 2:
logger.debug(
" Composition produces trivial solution, let's find a different one!"
)
xi_tmp = find_new_xi(
P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs
)
flag_check_liquid = False
else:
logger.debug(
" Composition produces trivial solution, using random guess to reset"
)
xi_tmp = np.random.rand(len(xi_tmp))
xi_tmp /= np.sum(xi_tmp)
if np.any(np.isnan(xi_tmp)):
phil, rhol, flagl = [np.nan, np.nan, 3]
xinew = xi_tmp
break
else:
phil, rhol, flagl = calc_liquid_fugacity_coefficient(
P, T, xi_tmp, Eos, density_opts=density_opts
)
xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid")
else:
xinew = calc_new_mole_fractions(yi, phil, phiv, phase="liquid")
xinew[np.isnan(xinew)] = 0.0
logger.debug(
" xi guess {}, xi calc {}, phil {}".format(
xi_tmp, xinew / np.sum(xinew), phil
)
)
logger.debug(
" Old xi_total: {}, New xi_total: {}, Change: {}".format(
xi_total[-1], np.sum(xinew), np.sum(xinew) - xi_total[-1]
)
)
# Check convergence
if abs(np.sum(xinew) - xi_total[-1]) < tol:
ind_tmp = np.where(xi_tmp == min(xi_tmp[xi_tmp > 0]))[0]
xi2 = xinew / np.sum(xinew)
if np.abs(xi2[ind_tmp] - xi_tmp[ind_tmp]) / xi_tmp[ind_tmp] < tol:
_xi_global = xi2
logger.info(
" Inner Loop Final xi: {}, Final Error on Smallest Fraction: {}%".format(
xi2,
np.abs(xi2[ind_tmp] - xi_tmp[ind_tmp]) / xi_tmp[ind_tmp] * 100,
)
)
break
if z < maxiter - 1:
xi_total.append(np.sum(xinew))
xi = xinew
xi2 = xinew / np.sum(xinew)
ind_tmp = np.where(xi_tmp == min(xi_tmp[xi_tmp > 0]))[0]
if z == maxiter - 1:
tmp = np.abs(xi2[ind_tmp] - xi_tmp[ind_tmp]) / xi_tmp[ind_tmp]
logger.warning(
" More than {} iterations needed. Error in Smallest Fraction: {} %%".format(
maxiter, tmp * 100
)
)
if tmp > 0.1: # If difference is greater than 10%
xinew = find_new_xi(
P, T, phiv, yi, Eos, density_opts=density_opts, **kwargs
)
xinew = spo.least_squares(
objective_find_xi,
xinew[0],
bounds=(0.0, 1.0),
args=(P, T, phiv, yi, Eos, density_opts),
)
xi = xinew.x[0]
xi_tmp = np.array([xi, 1 - xi])
obj = objective_find_xi(xi_tmp, P, T, phiv, yi, Eos, density_opts=density_opts)
logger.warning(
" Find xi with root algorithm, xi {}, obj {}".format(xi_tmp, obj)
)
return xi_tmp, phil, flagl
def find_new_yi(
P, T, phil, xi, Eos, bounds=(0.01, 0.99), npoints=30, density_opts={}, **kwargs
):
r"""
Search vapor mole fraction combinations for a new estimate that produces a vapor density.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phil : float
Fugacity coefficient of liquid at system pressure
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
bounds : tuple, Optional, default=(0.01, 0.99)
These bounds dictate the lower and upper boundary for the first component in a binary system.
npoints : float, Optional, default=30
Number of points to test between the bounds.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
"""
if len(kwargs) > 0:
logger.debug(
" 'find_new_yi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
yi_ext = np.linspace(bounds[0], bounds[1], npoints) # Guess for yi
obj_ext = np.zeros(len(yi_ext))
flag_ext = np.zeros(len(yi_ext))
for i, yi in enumerate(yi_ext):
yi = np.array([yi, 1 - yi])
obj, flagv = objective_find_yi(
yi, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
flag_ext[i] = flagv
obj_ext[i] = obj
tmp = np.count_nonzero(~np.isnan(obj_ext))
logger.debug(" Number of valid mole fractions: {}".format(tmp))
if tmp == 0:
yi_final = np.nan
obj_final = np.nan
else:
# Remove any NaN
obj_tmp = obj_ext[~np.isnan(obj_ext)]
yi_tmp = yi_ext[~np.isnan(obj_ext)]
# Fit spline
spline = interpolate.Akima1DInterpolator(yi_tmp, obj_tmp)
yi_min = spline.derivative().roots()
if len(yi_min) > 1:
# Remove local maxima
yi_concav = spline.derivative(nu=2)(yi_min)
yi_min = [yi_min[i] for i in range(len(yi_min)) if yi_concav[i] > 0.0]
# Add end points if relevant
if len(yi_tmp) > 1:
if obj_tmp[0] < obj_tmp[1]:
yi_min.insert(0, yi_tmp[0])
if obj_tmp[-1] < obj_tmp[-2]:
yi_min.append(yi_tmp[-1])
yi_min = np.array(yi_min)
## Remove trivial solution
obj_trivial = np.abs(yi_min - xi[0]) / xi[0]
ind = np.where(obj_trivial == min(obj_trivial))[0][0]
logger.debug(
" Found multiple minima: {}, discard {} as trivial solution".format(
yi_min, yi_min[ind]
)
)
# Remove liquid roots
yi_min = np.array([yi_min[ii] for ii in range(len(yi_min)) if ii != ind])
if len(yi_min) > 1:
lyi = len(yi_min)
obj_tmp2 = np.zeros(lyi)
flagv_tmp2 = np.zeros(lyi)
for ii in range(lyi):
obj_tmp2[ii], flagv_tmp2[ii] = objective_find_yi(
yi_min[ii],
P,
T,
phil,
xi,
Eos,
density_opts=density_opts,
return_flag=True,
)
yi_tmp2 = [
yi_min[ii] for ii in range(len(yi_min)) if flagv_tmp2[ii] != 1
]
if len(yi_tmp2):
obj_tmp2 = [
obj_tmp2[ii]
for ii in range(len(obj_tmp2))
if flagv_tmp2[ii] != 1
]
yi_min = [yi_tmp2[np.where(obj_tmp2 == min(obj_tmp2))[0][0]]]
else:
yi_min = [yi_min[np.where(obj_tmp2 == min(obj_tmp2))[0][0]]]
if not len(yi_min):
# Choose values with lowest objective function
ind = np.where(np.abs(obj_tmp) == min(np.abs(obj_tmp)))[0][0]
obj_final = obj_tmp[ind]
yi_final = yi_tmp[ind]
else:
yi_final = yi_min[0]
obj_final = spline(yi_min[0])
logger.debug(" Found new guess in yi: {}, Obj: {}".format(yi_final, obj_final))
if not gtb.isiterable(yi_final):
yi_final = np.array([yi_final, 1 - yi_final])
return yi_final
def bracket_bounding_yi(
P,
T,
phil,
xi,
Eos,
bounds=(0.01, 0.99),
maxiter=50,
tol=1e-7,
density_opts={},
**kwargs
):
r"""
Search binary vapor mole fraction combinations for a new estimate that produces a vapor density.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phil : float
Fugacity coefficient of liquid at system pressure
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
bounds : tuple, Optional, default=(0.01, 0.99)
These bounds dictate the lower and upper boundary for the first component in a binary system.
maxiter : int, Optional, default=50
Maximum number of iterations
tol : float, Optional, default=1e-7
Tolerance to quit search for yi
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
Returns
-------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
flag : int
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed
"""
if len(kwargs) > 0:
logger.debug(
" 'calc_saturation_properties' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
if np.size(bounds) != 2:
raise ValueError("Given bounds on y1 must be of length two.")
bounds = np.array(bounds)
obj_bounds = np.zeros(2)
flag_bounds = np.zeros(2)
obj_bounds[0], flag_bounds[0] = objective_find_yi(
bounds[0], P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
obj_bounds[1], flag_bounds[1] = objective_find_yi(
bounds[1], P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
if flag_bounds[0] == flag_bounds[1]:
logger.error(
" Both mole fractions have flag, {}, continue seeking convergence".format(
flag_bounds[0]
)
)
y1 = bounds[1]
flagv = flag_bounds[1]
else:
flag_high_vapor = False
for i in np.arange(maxiter):
y1 = np.mean(bounds)
obj, flagv = objective_find_yi(
y1, P, T, phil, xi, Eos, density_opts=density_opts, return_flag=True
)
if not flag_high_vapor:
ind = np.where(flag_bounds == flagv)[0][0]
if flagv == 0 and obj > 1 / tol:
flag_high_vapor = True
bounds[0], obj_bounds[0], flag_bounds[0] = (
bounds[ind],
obj_bounds[ind],
flag_bounds[ind],
)
ind = 1
else:
if obj < obj_bounds[0]:
ind = 0
else:
ind = 1
bounds[ind], obj_bounds[ind], flag_bounds[ind] = y1, obj, flagv
logger.debug(
" Bouncing mole fraction new bounds: {}, obj: {}, flag: {}".format(
bounds, obj_bounds, flag_bounds
)
)
# Check convergence
if np.abs(bounds[1] - bounds[0]) < tol:
break
ind_array = np.where(flag_bounds == 0)[0]
if np.size(ind_array) == 1:
ind = ind_array[0]
else:
ind = np.where(obj_bounds == np.min(obj_bounds))[0][0]
y1, flagv = bounds[ind], flag_bounds[ind]
if i == maxiter - 1:
logger.debug(
" Bouncing mole fraction, max iterations ended with, y1={}, flagv={}".format(
y1, flagv
)
)
else:
logger.debug(
" Bouncing mole fractions converged to y1={}, flagv={}".format(y1, flagv)
)
return np.array([y1, 1 - y1]), flagv
def objective_find_yi(yi, P, T, phil, xi, Eos, density_opts={}, return_flag=False):
r"""
Objective function for solving for stable vapor mole fraction.
Parameters
----------
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phil : float
Fugacity coefficient of liquid at system pressure
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
return_flag : bool, Optional, default=False
If True, the objective value and flagv is returned, otherwise, just the objective value is returned
Returns
-------
obj : numpy.ndarray
Objective function for solving for vapor mole fractions
flag : int, Optional
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed. Only outputted when `return_flag` is True
"""
if type(yi) == float or np.size(yi) == 1:
if gtb.isiterable(yi):
yi = np.array([yi[0], 1 - yi[0]])
else:
yi = np.array([yi, 1 - yi])
elif isinstance(yi, list):
yi = np.array(yi)
yi /= np.sum(yi)
phiv, _, flagv = calc_vapor_fugacity_coefficient(
P, T, yi, Eos, density_opts=density_opts
)
yinew = calc_new_mole_fractions(xi, phil, phiv, phase="vapor")
yi2 = yinew / np.sum(yinew)
if np.any(np.isnan(yi2)):
obj = np.nan
else:
phiv2, _, flagv2 = calc_vapor_fugacity_coefficient(
P, T, yi2, Eos, density_opts=density_opts
)
obj = np.sum(np.abs(yinew - xi * phil / phiv2))
logger.debug(
" Guess yi: {}, calc yi: {}, diff={}, flagv {}".format(yi, yi2, obj, flagv)
)
if return_flag:
return obj, flagv
else:
return obj
def find_new_xi(
P, T, phiv, yi, Eos, density_opts={}, bounds=(0.001, 0.999), npoints=30, **kwargs
):
r"""
Search liquid mole fraction combinations for a new estimate that produces a liquid density.
Parameters
----------
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phiv : float
Fugacity coefficient of vapor at system pressure
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
bounds : tuple, Optional, default=(0.001, 0.999)
These bounds dictate the lower and upper boundary for the first component in a binary system.
npoints : float, Optional, default=30
Number of points to test between the bounds.
Returns
-------
xi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
"""
if len(kwargs) > 0:
logger.debug(
" 'find_new_xi' does not use the following keyword arguments: {}".format(
", ".join(list(kwargs.keys()))
)
)
xi_ext = np.linspace(bounds[0], bounds[1], npoints) # Guess for yi
obj_ext = np.zeros(len(xi_ext))
flag_ext = np.zeros(len(xi_ext))
for i, xi in enumerate(xi_ext):
xi = np.array([xi, 1 - xi])
obj, flagl = objective_find_xi(
xi, P, T, phiv, yi, Eos, density_opts=density_opts, return_flag=True
)
flag_ext[i] = flagl
obj_ext[i] = obj
tmp = np.count_nonzero(~np.isnan(obj_ext))
logger.debug(" Number of valid mole fractions: {}".format(tmp))
if tmp == 0:
xi_final = np.nan
obj_final = np.nan
else:
# Remove any NaN
obj_tmp = obj_ext[~np.isnan(obj_ext)]
xi_tmp = xi_ext[~np.isnan(obj_ext)]
spline = interpolate.Akima1DInterpolator(xi_tmp, obj_tmp)
xi_min = spline.derivative().roots()
if len(xi_min) > 1:
# Remove local maxima
xi_concav = spline.derivative(nu=2)(xi_min)
xi_min = [xi_min[i] for i in range(len(xi_min)) if xi_concav[i] > 0.0]
# Add end points if relevant
if len(xi_tmp) > 1:
if obj_tmp[0] < obj_tmp[1]:
xi_min.insert(0, xi_tmp[0])
if obj_tmp[-1] < obj_tmp[-2]:
xi_min.append(xi_tmp[-1])
xi_min = np.array(xi_min)
# Remove trivial solution
obj_trivial = np.abs(xi_min - yi[0]) / yi[0]
ind = np.where(obj_trivial == min(obj_trivial))[0][0]
logger.debug(
" Found multiple minima: {}, discard {} as trivial solution".format(
xi_min, xi_min[ind]
)
)
xi_min = np.array([xi_min[ii] for ii in range(len(xi_min)) if ii != ind])
if not len(xi_min):
# Choose values with lowest objective function
ind = np.where(np.abs(obj_tmp) == min(np.abs(obj_tmp)))[0][0]
obj_final = obj_tmp[ind]
xi_final = xi_tmp[ind]
else:
xi_final = xi_min[0]
obj_final = spline(xi_min[0])
logger.debug(" Found new guess in xi: {}, Obj: {}".format(xi_final, obj_final))
if not gtb.isiterable(xi_final):
xi_final = np.array([xi_final, 1 - xi_final])
return xi_final
def objective_find_xi(xi, P, T, phiv, yi, Eos, density_opts={}, return_flag=False):
r"""
Objective function for solving for stable vapor mole fraction.
Parameters
----------
xi : numpy.ndarray
Liquid mole fraction of each component, sum(xi) should equal 1.0
P : float
[Pa] Pressure of the system
T : float
[K] Temperature of the system
phiv : float
Fugacity coefficient of vapor at system pressure
yi : numpy.ndarray
Vapor mole fraction of each component, sum(yi) should equal 1.0
Eos : obj
An instance of the defined EOS class to be used in thermodynamic computations.
density_opts : dict, Optional, default={}
Dictionary of options used in calculating pressure vs. specific volume in :func:`~despasito.thermodynamics.calc.pressure_vs_volume_arrays`
return_flag : bool, Optional, default=False
If True, the objective value and flagl is returned, otherwise, just the objective value is returned
Returns
-------
obj : numpy.ndarray
Objective function for solving for liquid mole fractions
flag : int, Optional
Flag identifying the fluid type. A value of 0 is vapor, 1 is liquid, 2 mean a critical fluid, 3 means that neither is true, 4 means ideal gas is assumed. Only outputted when `return_flag` is True
"""
if isinstance(xi, float) or len(xi) == 1:
if gtb.isiterable(xi):
xi = | np.array([xi[0], 1 - xi[0]]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 17 18:02:53 2019
@author: sayan
"""
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.datasets import make_blobs
import matplotlib.pyplot as plt
import numpy as np
import argparse
def sigmoid_activation(x):
return 1.0/(1+ | np.exp(x) | numpy.exp |
import numpy as np
import matplotlib.pyplot as plt
from transforms3d.euler import euler2mat
from mpl_toolkits.mplot3d import Axes3D
class Joint:
def __init__(self, name, direction, length, axis, dof, limits):
"""
Definition of basic joint. The joint also contains the information of the
bone between it's parent joint and itself. Refer
[here](https://research.cs.wisc.edu/graphics/Courses/cs-838-1999/Jeff/ASF-AMC.html)
for detailed description for asf files.
Parameter
---------
name: Name of the joint defined in the asf file. There should always be one
root joint. String.
direction: Default direction of the joint(bone). The motions are all defined
based on this default pose.
length: Length of the bone.
axis: Axis of rotation for the bone.
dof: Degree of freedom. Specifies the number of motion channels and in what
order they appear in the AMC file.
limits: Limits on each of the channels in the dof specification
"""
self.name = name
self.direction = np.reshape(direction, [3, 1])
self.length = length
axis = np.deg2rad(axis)
self.C = euler2mat(*axis)
self.Cinv = np.linalg.inv(self.C)
self.limits = np.zeros([3, 2])
for lm, nm in zip(limits, dof):
if nm == 'rx':
self.limits[0] = lm
elif nm == 'ry':
self.limits[1] = lm
else:
self.limits[2] = lm
self.parent = None
self.children = []
self.coordinate = None
self.matrix = None
def set_motion(self, motion):
if self.name == 'root':
self.coordinate = np.reshape(np.array(motion['root'][:3]), [3, 1])
rotation = np.deg2rad(motion['root'][3:])
self.matrix = self.C.dot(euler2mat(*rotation)).dot(self.Cinv)
else:
idx = 0
rotation = | np.zeros(3) | numpy.zeros |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer_dtype(obj, expect):
assert types.is_integer_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), True),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_integer(obj, expect):
assert types.is_integer(obj) == expect
# TODO: Temporarily ignoring all cases of "object" until we decide what to do.
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, True),
# (object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, True),
(np.unicode_, True),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), True),
(np.unicode_(), True),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), True),
(np.dtype("unicode"), True),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
# (np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), True),
(np.array([], dtype=np.unicode_), True),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
# (np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), True),
(pd.Series(dtype="unicode"), True),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
# (pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), True),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_string_dtype(obj, expect):
assert types.is_string_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, True),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), True),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), True),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), True),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), True),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), True),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_datetime_dtype(obj, expect):
assert types.is_datetime_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, True),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), True),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), True),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_list_dtype(obj, expect):
assert types.is_list_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, True),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
# (cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), True),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
# (cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), True),
# (cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_struct_dtype(obj, expect):
# TODO: All inputs of interval types are currently disabled due to
# inconsistent behavior of is_struct_dtype for interval types that will be
# fixed as part of the array refactor.
assert types.is_struct_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
( | np.dtype("float") | numpy.dtype |
import sys, os, glob, string
import numpy as np
import astropy as ast
import matplotlib.pyplot as plt
from pyraf import iraf
import odi_config as odi
import pandas as pd
from astropy.coordinates import SkyCoord
from astropy import units as u
from collections import OrderedDict
def tpv_remove(img):
"""
Remove the TPV values from a final stacked image. Each OTA has a set of TPV
header keywords that define the WCS solution. The way the final images are
stacked, the TPV values from the last OTA in the list, OTA22 for example,
are what are inherited by the final image. Without removing these values
other Python scripts, and other program such as Source Extractor, will no
be able to accurately convert an x,y position to Ra and Dec.
Parameters
----------
img : str
String containing name of the image currently in use.
Returns
-------
img : str
Name of the new image produced by this function.
Examples
--------
>>> img = 'GCPair-F1_odi_g.fits'
>>> new_img = tpv_remove(img)
>>> print new_img
>>> 'GCPair-F1_odi_g-nopv.fits'
"""
if not os.path.isfile(img.nofits()+'-nopv.fits'):
print('Removing PV keywords from: ',img)
hdulist = odi.fits.open(img.f)
header = hdulist[0].header
pvlist = header['PV*']
for pv in pvlist:
header.remove(pv)
hdulist.writeto(img.nofits()+'-nopv.fits')
return img.nofits()+'-nopv.fits'
def trim_img(img,x1,x2,y1,y2):
"""
Trim a stacked image based on the coordinates given. The image is trimmed
using ``imcopy`` through pyraf, so the x and y pixel ranges should be given
in the correct ``imcopy`` format. ``[x1:x2,y1:y2]``
Parameters
---------
img : str
String containing name of the image currently in use
x1 : int
Pixel coordinate of x1
x2 : int
Pixel coordinate of x2
y1 : int
Pixel coordinate of y1
y2 : int
Pixel coordinate of y2
Returns
-------
img : str
The new image is given the extension ``.trim.fits``.
"""
x1,x2 = x1,x2
y1,y2 = y1,y2
input = img.nofits()+'['+repr(x1)+':'+repr(x2)+','+repr(y1)+':'+repr(y2)+']'
output = img.nofits()+'.trim.fits'
if not os.path.isfile(output):
print('Trimming image: ' ,img)
iraf.unlearn(iraf.imcopy)
iraf.imcopy(input = input,output = output,verbose='no',mode='h')
def full_sdssmatch(img1,img2,inst,gmaglim=19):
"""
This function requires two stacked images, one each filter that will be used
in solving the color equations. The purpose of this function is to first
collect all of the SDSS sources in a given field using the
``odi.sdss_coords_full`` function. After collecting a catalog of the SDSS
sources in each image this function creates a catalog of the SDSS matches
between the two fields. This is required to form the SDSS color that will be
used in solving the color equations. The function returns a ``Pandas``
dataframe of the matched sources in each field.
Parameters
----------
img1 : str
Name of the stacked image in the first filter (e.g. odi_g)
img2 : str
Name of the stacked image in the second filter (e.g. odi_r)
inst : str
The version of ODI used to collect the data (podi or 5odi)
gmaglim : float
The g magnitude limit to set on the SDSS sources retrieved
in each field.
Returns
-------
img1_match_df: pandas dataframe
Pandas dataframe of matched sources in img 1
img2_match_df: pandas dataframe
Pandas dataframe of matched sources in img 2
Examples
--------
>>> img1 = 'GCPair-F1_odi_g.fits'
>>> img2 = 'GCPair-F1_odi_r.fits'
>>> inst = 'podi'
>>> img1_match_df, img2_match_df = full_sdssmatch(img1,img2,inst)
"""
odi.sdss_coords_full(img1,inst,gmaglim=gmaglim)
img1_sdss_cat = img1[:-5]+'.sdssxy'
img1_match = img1[:-5]+'.match.sdssxy'
odi.sdss_coords_full(img2,inst,gmaglim=gmaglim)
img2_sdss_cat = img2[:-5]+'.sdssxy'
img2_match = img2[:-5]+'.match.sdssxy'
x_1, y_1, ras_1,decs_1,psfMag_u_1,psfMagErr_u_1,psfMag_g_1,psfMagErr_g_1,psfMag_r_1,psfMagErr_r_1,psfMag_i_1,psfMagErr_i_1,psfMag_z_1,psfMagErr_z_1 = np.loadtxt(img1_sdss_cat,
usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13),
unpack=True)
x_2, y_2, ras_2,decs_2,psfMag_u_2,psfMagErr_u_2,psfMag_g_2,psfMagErr_g_2,psfMag_r_2,psfMagErr_r_2,psfMag_i_2,psfMagErr_i_2,psfMag_z_2,psfMagErr_z_2 = np.loadtxt(img2_sdss_cat,
usecols=(0,1,2,3,4,5,6,7,8,9,10,11,12,13),
unpack=True)
img1_catalog = SkyCoord(ra = ras_1*u.degree, dec= decs_1*u.degree)
img2_catalog = SkyCoord(ra = ras_2*u.degree, dec= decs_2*u.degree)
id_img1, id_img2, d2d, d3d = img2_catalog.search_around_sky(img1_catalog,0.000001*u.deg)
x_1 = x_1[id_img1]
y_1 = y_1[id_img1]
ras_1 = ras_1[id_img1]
decs_1 = decs_1[id_img1]
psfMag_u_1 = psfMag_u_1[id_img1]
psfMagErr_u_1 = psfMagErr_u_1[id_img1]
psfMag_g_1 = psfMag_g_1[id_img1]
psfMagErr_g_1 = psfMagErr_g_1[id_img1]
psfMag_r_1 = psfMag_r_1[id_img1]
psfMagErr_r_1 = psfMagErr_r_1[id_img1]
psfMag_i_1 = psfMag_i_1[id_img1]
psfMagErr_i_1 = psfMagErr_i_1[id_img1]
psfMag_z_1 = psfMag_z_1[id_img1]
psfMagErr_z_1 = psfMagErr_z_1[id_img1]
img1_match_dict = OrderedDict([('x_1',x_1),('y_1',y_1),('ras_1',ras_1),
('decs_1',decs_1),('psfMag_u_1',psfMag_u_1),
('psfMagErr_u_1',psfMagErr_u_1),
('psfMag_g_1',psfMag_g_1),('psfMagErr_g_1',psfMagErr_g_1),
('psfMag_r_1',psfMag_r_1),('psfMagErr_r_1',psfMagErr_r_1),
('psfMag_i_1',psfMag_i_1),('psfMagErr_i_1',psfMagErr_i_1),
('psfMag_z_1',psfMag_z_1),('psfMagErr_z_1',psfMagErr_z_1)])
img1_match_df = pd.DataFrame.from_dict(img1_match_dict)
img1_match_df.to_csv(img1_match,index=False,sep= ' ',header=False)
x_2 = x_2[id_img2]
y_2 = y_2[id_img2]
ras_2 = ras_2[id_img2]
decs_2 = decs_2[id_img2]
psfMag_u_2 = psfMag_u_2[id_img2]
psfMagErr_u_2 = psfMagErr_u_2[id_img2]
psfMag_g_2 = psfMag_g_2[id_img2]
psfMagErr_g_2 = psfMagErr_g_2[id_img2]
psfMag_r_2 = psfMag_r_2[id_img2]
psfMagErr_r_2 = psfMagErr_r_2[id_img2]
psfMag_i_2 = psfMag_i_2[id_img2]
psfMagErr_i_2 = psfMagErr_i_2[id_img2]
psfMag_z_2 = psfMag_z_2[id_img2]
psfMagErr_z_2 = psfMagErr_z_2[id_img2]
img2_match_dict = OrderedDict([('x_2',x_2),('y_2',y_2),('ras_2',ras_2),
('decs_2',decs_2),('psfMag_u_2',psfMag_u_2),
('psfMagErr_u_2',psfMagErr_u_2),
('psfMag_g_2',psfMag_g_2),('psfMagErr_g_2',psfMagErr_g_2),
('psfMag_r_2',psfMag_r_2),('psfMagErr_r_2',psfMagErr_r_2),
('psfMag_i_2',psfMag_i_2),('psfMagErr_i_2',psfMagErr_i_2),
('psfMag_z_2',psfMag_z_2),('psfMagErr_z_2',psfMagErr_z_2)])
img2_match_df = pd.DataFrame.from_dict(img2_match_dict)
img2_match_df.to_csv(img2_match,index=False,sep= ' ',header=False)
return img1_match_df, img2_match_df
def sdss_source_props_full(img):
"""
Use photutils to get the elongation of all of the sdss sources
can maybe use for point source filter
"""""
hdulist = odi.fits.open(img.f)
data = hdulist[0].data
sdss_source_file = img.nofits()+'.match.sdssxy'
x,y,ra,dec,g,g_err,r,r_err = np.loadtxt(sdss_source_file,usecols=(0,1,2,3,
6,7,8,9),unpack=True)
box_centers = list(zip(y,x))
box_centers = np.reshape(box_centers,(len(box_centers),2))
source_dict = {}
for i,center in enumerate(box_centers):
x1 = center[0]-50
x2 = center[0]+50
y1 = center[1]-50
y2 = center[1]+50
#print x1,x2,y1,y2,center
box = data[x1:x2,y1:y2]
#odi.plt.imshow(box)
#plt.show()
mean, median, std = odi.sigma_clipped_stats(box, sigma=3.0)
threshold = median + (std * 2.)
segm_img = odi.detect_sources(box, threshold, npixels=20)
source_props = odi.source_properties(box,segm_img)
columns = ['xcentroid', 'ycentroid','elongation','semimajor_axis_sigma','semiminor_axis_sigma']
if i == 0:
source_tbl = source_props.to_table(columns=columns)
else:
source_tbl.add_row((source_props[0].xcentroid,source_props[0].ycentroid,
source_props[0].elongation,source_props[0].semimajor_axis_sigma,
source_props[0].semiminor_axis_sigma))
elong_med,elong_std = np.median(source_tbl['elongation']),np.std(source_tbl['elongation'])
hdulist.close()
return elong_med,elong_std
def read_proc(file,filter):
"""
This functions reads and collects information from the ``derived_props.txt``
file that is produced by ``odi_process.py``.
Parameters
----------
file : str
This can be anything, but most often will be ``derived_props.txt``
filter : str
ODI filter string
Returns
-------
median_fwhm : float
median fwhm measure of individual OTAs that went into a stack
median_bg_mean : float
mean fwhm measure of individual OTAs that went into a stack
median_bg_median : float
median background of individual OTAs that went into a stack
median_bg_std : float
median standard deviation of background in individual OTAs
that went into a stack
Note
-----
The fwhm values need to be remeasured in the final stack. There is an
additional function that completes this task.
"""
filter_str = np.loadtxt(file,usecols=(2,),unpack=True,dtype=str)
fwhm,bg_mean,bg_med,bg_std = np.loadtxt(file,usecols=(3,6,7,8),unpack=True)
median_fwhm = np.median(fwhm[np.where(filter_str == filter)])
median_bg_mean = np.median(bg_mean[np.where(filter_str == filter)])
median_bg_median = np.median(bg_med[np.where(filter_str == filter)])
median_bg_std = np.median(bg_std[np.where(filter_str == filter)])
return median_fwhm,median_bg_mean,median_bg_median,median_bg_std
def get_airmass(image_list):
"""
Calculate the median arimass of
all the dithers in a given
filter
"""
airmasses = []
for img in image_list:
hdulist = odi.fits.open(img.f)
airmasses.append(hdulist[0].header['airmass'])
hdulist.close()
return | np.median(airmasses) | numpy.median |
import time
import shutil
import os
import sys
import subprocess
import math
import pickle
import glob
import json
from copy import deepcopy
import warnings
import random
from multiprocessing import Pool
# import emukit.multi_fidelity as emf
# from emukit.model_wrappers.gpy_model_wrappers import GPyMultiOutputWrapper
# from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array, convert_xy_lists_to_arrays
try:
moduleName = "emukit"
import emukit.multi_fidelity as emf
from emukit.model_wrappers.gpy_model_wrappers import GPyMultiOutputWrapper
from emukit.multi_fidelity.convert_lists_to_array import convert_x_list_to_array, convert_xy_lists_to_arrays
moduleName = "pyDOE"
from pyDOE import lhs
moduleName = "GPy"
import GPy as GPy
moduleName = "scipy"
from scipy.stats import lognorm, norm
moduleName = "numpy"
import numpy as np
error_tag=False
except:
error_tag=True
class GpFromModel(object):
def __init__(self, work_dir, run_type, os_type, inp, errlog):
t_init = time.time()
self.errlog = errlog
self.work_dir = work_dir
self.os_type = os_type
self.run_type = run_type
#
# From external READ JSON FILE
#
rv_name = list()
self.g_name = list()
x_dim = 0
y_dim = 0
for rv in inp['randomVariables']:
rv_name = rv_name + [rv['name']]
x_dim += 1
if x_dim == 0:
msg = 'Error reading json: RV is empty'
errlog.exit(msg)
for g in inp['EDP']:
if g['length']==1: # scalar
self.g_name = self.g_name + [g['name']]
y_dim += 1
else: # vector
for nl in range(g['length']):
self.g_name = self.g_name + ["{}_{}".format(g['name'],nl+1)]
y_dim += 1
if y_dim == 0:
msg = 'Error reading json: EDP(QoI) is empty'
errlog.exit(msg)
# Accuracy is also sensitive to the range of X
self.id_sim = 0
self.x_dim = x_dim
self.y_dim = y_dim
self.rv_name = rv_name
self.do_predictive = False
automate_doe = False
surrogateInfo = inp["UQ_Method"]["surrogateMethodInfo"]
try:
self.do_parallel = surrogateInfo["parallelExecution"]
except:
self.do_parallel = True
if self.do_parallel:
if self.run_type.lower() == 'runninglocal':
self.n_processor = os.cpu_count()
from multiprocessing import Pool
self.pool = Pool(self.n_processor)
else:
# Always
from mpi4py import MPI
from mpi4py.futures import MPIPoolExecutor
self.world = MPI.COMM_WORLD
self.pool = MPIPoolExecutor()
self.n_processor = self.world.Get_size()
#self.n_processor =20
print("nprocessor :")
print(self.n_processor)
#self.cal_interval = 5
self.cal_interval = self.n_processor
else:
self.pool = 0
self.cal_interval = 5
if surrogateInfo["method"] == "Sampling and Simulation":
self.do_mf = False
do_sampling = True
do_simulation = True
self.use_existing = surrogateInfo["existingDoE"]
if self.use_existing:
self.inpData = os.path.join(work_dir, "templatedir/inpFile.in")
self.outData = os.path.join(work_dir, "templatedir/outFile.in")
thr_count = surrogateInfo['samples'] # number of samples
if surrogateInfo["advancedOpt"]:
self.doe_method = surrogateInfo["DoEmethod"]
if surrogateInfo["DoEmethod"] == "None":
do_doe = False
user_init = thr_count
else:
do_doe = True
user_init = surrogateInfo["initialDoE"]
else:
self.doe_method = "pareto" #default
do_doe = True
user_init = -100
elif surrogateInfo["method"] == "Import Data File":
self.do_mf = False
do_sampling = False
do_simulation = not surrogateInfo["outputData"]
self.doe_method = "None" # default
do_doe = False
# self.inpData = surrogateInfo['inpFile']
self.inpData = os.path.join(work_dir, "templatedir/inpFile.in")
if not do_simulation:
# self.outData = surrogateInfo['outFile']
self.outData = os.path.join(work_dir, "templatedir/outFile.in")
elif surrogateInfo["method"] == "Import Multi-fidelity Data File":
self.do_mf = True
self.doe_method = "None" # default
self.hf_is_model = surrogateInfo['HFfromModel']
self.lf_is_model = surrogateInfo['LFfromModel']
if self. hf_is_model:
self.use_existing_hf = surrogateInfo["existingDoE_HF"]
self.samples_hf = surrogateInfo["samples_HF"]
if self.use_existing_hf:
self.inpData = os.path.join(work_dir, "templatedir/inpFile_HF.in")
self.outData = os.path.join(work_dir, "templatedir/outFile_HF.in")
else:
self.inpData_hf = os.path.join(work_dir, "templatedir/inpFile_HF.in")
self.outData_hf = os.path.join(work_dir, "templatedir/outFile_HF.in")
self.X_hf = read_txt(self.inpData_hf, errlog)
self.Y_hf = read_txt(self.outData_hf, errlog)
if self.X_hf.shape[0] != self.Y_hf.shape[0]:
msg = 'Error reading json: high fidelity input and output files should have the same number of rows'
errlog.exit(msg)
if self.lf_is_model:
self.use_existing_lf = surrogateInfo["existingDoE_LF"]
self.samples_lf = surrogateInfo["samples_LF"]
if self.use_existing_lf:
self.inpData = os.path.join(work_dir, "templatedir/inpFile_LF.in")
self.outData = os.path.join(work_dir, "templatedir/outFile_LF.in")
else:
self.inpData_lf = os.path.join(work_dir, "templatedir/inpFile_LF.in")
self.outData_lf = os.path.join(work_dir, "templatedir/outFile_LF.in")
self.X_lf = read_txt(self.inpData_lf, errlog)
self.Y_lf = read_txt(self.outData_lf, errlog)
if self.X_lf.shape[0] != self.Y_lf.shape[0]:
msg = 'Error reading json: low fidelity input and output files should have the same number of rows'
errlog.exit(msg)
if (not self.hf_is_model) and self.lf_is_model:
self.mf_case = "data-model"
do_sampling = True
do_simulation = True
do_doe = surrogateInfo["doDoE"]
self.use_existing = self.use_existing_lf
if self.lf_is_model:
if self.use_existing_lf:
self.inpData = self.inpData_lf
self.oupData = self.outData_lf
else:
self.inpData = self.inpData_lf
self.outData = self.outData_lf
if do_doe:
user_init = -100
else:
user_init = self.samples_lf
thr_count = self.samples_lf # number of samples
elif self.hf_is_model and (not self.lf_is_model):
self.mf_case = "model-data"
do_sampling = True
do_simulation = True
do_doe = surrogateInfo["doDoE"]
self.use_existing = self.use_existing_hf
if self.hf_is_model:
if self.use_existing_hf:
self.inpData = self.inpData_hf
self.oupData = self.outData_hf
else:
self.inpData = self.inpData_hf
self.outData = self.outData_hf
if do_doe:
user_init = -100
else:
user_init = self.samples_hf
thr_count = self.samples_hf # number of samples
elif self.hf_is_model and self.lf_is_model:
self.mf_case = "model-model"
do_sampling = True
do_simulation = True
do_doe = surrogateInfo["doDoE"]
elif (not self.hf_is_model) and (not self.lf_is_model):
self.mf_case = "data-data"
do_sampling = False
do_simulation = False
do_doe = False
self.inpData = self.inpData_lf
self.outData = self.outData_lf
else:
msg = 'Error reading json: either select "Import Data File" or "Sampling and Simulation"'
errlog.exit(msg)
if surrogateInfo["advancedOpt"]:
self.do_logtransform = surrogateInfo["logTransform"]
kernel = surrogateInfo["kernel"]
do_linear = surrogateInfo["linear"]
nugget_opt = surrogateInfo["nuggetOpt"]
try:
self.nuggetVal = np.array(json.loads("[{}]".format(surrogateInfo["nuggetString"])))
except json.decoder.JSONDecodeError:
msg = 'Error reading json: improper format of nugget values/bounds. Provide nugget values/bounds of each QoI with comma delimiter'
errlog.exit(msg)
if self.nuggetVal.shape[0]!=self.y_dim and self.nuggetVal.shape[0]!=0 :
msg = 'Error reading json: Number of nugget quantities ({}) does not match # QoIs ({})'.format(self.nuggetVal.shape[0],self.y_dim)
errlog.exit(msg)
if nugget_opt == "Fixed Values":
for Vals in self.nuggetVal:
if (not np.isscalar(Vals)):
msg = 'Error reading json: provide nugget values of each QoI with comma delimiter'
errlog.exit(msg)
elif nugget_opt == "Fixed Bounds":
for Bous in self.nuggetVal:
if (np.isscalar(Bous)):
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif (isinstance(Bous,list)):
msg = 'Error reading json: provide both lower and upper bounds of nugget'
errlog.exit(msg)
elif Bous.shape[0]!=2:
msg = 'Error reading json: provide nugget bounds of each QoI in brackets with comma delimiter, e.g. [0.0,1.0],[0.0,2.0],...'
errlog.exit(msg)
elif Bous[0]>Bous[1]:
msg = 'Error reading json: the lower bound of a nugget value should be smaller than its upper bound'
errlog.exit(msg)
# if self.do_logtransform:
# mu = 0
# sig2 = self.nuggetVal
# #median = np.exp(mu)
# #mean = np.exp(mu + sig2/2)
# self.nuggetVal = np.exp(2*mu + sig2)*(np.exp(sig2)-1)
else:
self.do_logtransform = False
kernel = 'Matern 5/2'
do_linear = False
#do_nugget = True
nugget_opt = "optimize"
if not self.do_mf:
if do_simulation:
femInfo = inp["fem"]
self.inpFile = femInfo["inputFile"]
self.postFile = femInfo["postprocessScript"]
self.appName = femInfo["program"]
#
# get x points
#
if do_sampling:
thr_NRMSE = surrogateInfo["accuracyLimit"]
thr_t = surrogateInfo["timeLimit"] * 60
np.random.seed(surrogateInfo['seed'])
random.seed(surrogateInfo['seed'])
self.xrange = np.empty((0, 2), float)
for rv in inp['randomVariables']:
if "lowerbound" not in rv:
msg = 'Error in input RV: all RV should be set to Uniform distribution'
errlog.exit(msg)
self.xrange = np.vstack((self.xrange, [rv['lowerbound'], rv['upperbound']]))
self.len = np.abs(np.diff(self.xrange).T[0])
if sum(self.len == 0) > 0:
msg = 'Error in input RV: training range of RV should be greater than 0'
errlog.exit(msg)
#
# Read existing samples
#
if self.use_existing:
X_tmp = read_txt(self.inpData,errlog)
Y_tmp = read_txt(self.outData,errlog)
n_ex = X_tmp.shape[0]
if self.do_mf:
if X_tmp.shape[1] != self.X_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} RV column(s) but low fidelity model have {}.'.format(
self.X_hf.shape[1], X_tmp.shape[1])
errlog.exit(msg)
if Y_tmp.shape[1] != self.Y_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} QoI column(s) but low fidelity model have {}.'.format(
self.Y_hf.shape[1], Y_tmp.shape[1])
errlog.exit(msg)
if X_tmp.shape[1] != x_dim:
msg = 'Error importing input data: dimension inconsistent: have {} RV(s) but have {} column(s).'.format(
x_dim, X_tmp.shape[1])
errlog.exit(msg)
if Y_tmp.shape[1] != y_dim:
msg = 'Error importing input data: dimension inconsistent: have {} QoI(s) but have {} column(s).'.format(
y_dim, Y_tmp.shape[1])
errlog.exit(msg)
if n_ex != Y_tmp.shape[0]:
msg = 'Error importing input data: numbers of samples of inputs ({}) and outputs ({}) are inconsistent'.format(n_ex, Y_tmp.shape[0])
errlog.exit(msg)
else:
n_ex = 0
if user_init ==0:
#msg = 'Error reading json: # of initial DoE should be greater than 0'
#errlog.exit(msg)
user_init = -1;
X_tmp = np.zeros((0, x_dim))
Y_tmp = np.zeros((0, y_dim))
if user_init < 0:
n_init_ref = min(4 * x_dim, thr_count + n_ex - 1, 500)
if self.do_parallel:
n_init_ref = int(np.ceil(n_init_ref/self.n_processor)*self.n_processor) # Let's not waste resource
if n_init_ref > n_ex:
n_init = n_init_ref - n_ex
else:
n_init = 0
else:
n_init = user_init
n_iter = thr_count - n_init
def FEM_batch(Xs, id_sim):
return run_FEM_batch(Xs, id_sim, self.rv_name, self.do_parallel, self.y_dim, self.os_type, self.run_type, self.pool, t_init, thr_t)
# check validity of datafile
if n_ex > 0:
#Y_test, self.id_sim = FEM_batch(X_tmp[0, :][np.newaxis], self.id_sim)
# TODO : Fix this
print(X_tmp[0, :][np.newaxis].shape)
X_test, Y_test ,self.id_sim= FEM_batch(X_tmp[0, :][np.newaxis] ,self.id_sim)
if np.sum(abs((Y_test - Y_tmp[0, :][np.newaxis]) / Y_test) > 0.01, axis=1) > 0:
msg = 'Consistency check failed. Your data is not consistent to your model response.'
errlog.exit(msg)
if n_init>0:
n_init -= 1
else:
n_iter -= 1
#
# generate initial samples
#
if n_init>0:
U = lhs(x_dim, samples=(n_init))
X = np.vstack([X_tmp, np.zeros((n_init, x_dim))])
for nx in range(x_dim):
X[n_ex:n_ex+n_init, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
else:
X = X_tmp
if sum(abs(self.len / self.xrange[:, 0]) < 1.e-7) > 1:
msg = 'Error : upperbound and lowerbound should not be the same'
errlog.exit(msg)
n_iter = thr_count - n_init
else:
n_ex = 0
thr_NRMSE = 0.02 # default
thr_t = float('inf')
#
# Read sample locations from directory
#
X = read_txt(self.inpData,errlog)
if self.do_mf:
if X.shape[1] != self.X_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} RV column(s) but low fidelity model have {}.'.format(
self.X_hf.shape[1], X.shape[1])
errlog.exit(msg)
if X.shape[1] != x_dim:
msg = 'Error importing input data: Number of dimension inconsistent: have {} RV(s) but {} column(s).' \
.format(x_dim, X.shape[1])
errlog.exit(msg)
self.xrange = np.vstack([np.min(X, axis=0), np.max(X, axis=0)]).T
self.len = 2 * np.std(X, axis=0)
thr_count = X.shape[0]
n_init = thr_count
n_iter = 0
# give error
if thr_count <= 2:
msg = 'Number of samples should be greater than 2.'
errlog.exit(msg)
if do_doe:
ac = 1 # pre-screening time = time*ac
ar = 1 # cluster
n_candi = min(200 * x_dim, 2000) # candidate points
n_integ = min(200 * x_dim, 2000) # integration points
if user_init > thr_count:
msg = 'Number of DoE cannot exceed total number of simulation'
errlog.exit(msg)
else:
ac = 1 # pre-screening time = time*ac
ar = 1 # cluster
n_candi = 1 # candidate points
n_integ = 1 # integration points
user_init = thr_count
#
# get y points
#
if do_simulation:
#
# SimCenter workflow setting
#
if os.path.exists('{}/workdir.1'.format(work_dir)):
is_left = True
idx = 0
def change_permissions_recursive(path, mode):
for root, dirs, files in os.walk(path, topdown=False):
for dir in [os.path.join(root, d) for d in dirs]:
os.chmod(dir, mode)
for file in [os.path.join(root, f) for f in files]:
os.chmod(file, mode)
while is_left:
idx = idx + 1
try:
if os.path.exists('{}/workdir.{}/workflow_driver.bat'.format(work_dir, idx)):
#os.chmod('{}/workdir.{}'.format(work_dir, idx), 777)
change_permissions_recursive('{}/workdir.{}'.format(work_dir, idx), 0o777)
my_dir = '{}/workdir.{}'.format(work_dir, idx)
os.chmod(my_dir, 0o777)
shutil.rmtree(my_dir)
#shutil.rmtree('{}/workdir.{}'.format(work_dir, idx), ignore_errors=False, onerror=handleRemoveReadonly)
except Exception as ex:
print(ex)
is_left = True
break
print("Cleaned the working directory")
else:
print("Work directory is clean")
if os.path.exists('{}/dakotaTab.out'.format(work_dir)):
os.remove('{}/dakotaTab.out'.format(work_dir))
if os.path.exists('{}/inputTab.out'.format(work_dir)):
os.remove('{}/inputTab.out'.format(work_dir))
if os.path.exists('{}/outputTab.out'.format(work_dir)):
os.remove('{}/outputTab.out'.format(work_dir))
if os.path.exists('{}/SimGpModel.pkl'.format(work_dir)):
os.remove('{}/SimGpModel.pkl'.format(work_dir))
if os.path.exists('{}/verif.out'.format(work_dir)):
os.remove('{}/verif.out'.format(work_dir))
# func = self.__run_FEM(X,self.id_sim, self.rv_name)
#
# Generate initial samples
#
t_tmp = time.time()
X_fem, Y_fem ,self.id_sim= FEM_batch(X[n_ex:, :],self.id_sim)
Y = np.vstack((Y_tmp,Y_fem))
X = np.vstack((X[0:n_ex, :],X_fem))
t_sim_all = time.time() - t_tmp
if automate_doe:
self.t_sim_each = t_sim_all / n_init
else:
self.t_sim_each = float("inf")
#
# Generate predictive samples
#
if self.do_predictive:
n_pred = 100
Xt = np.zeros((n_pred, x_dim))
U = lhs(x_dim, samples=n_pred)
for nx in range(x_dim):
Xt[:, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
#
# Yt = np.zeros((n_pred, y_dim))
# for ns in range(n_pred):
# Yt[ns, :],self.id_sim = run_FEM(Xt[ns, :][np.newaxis],self.id_sim, self.rv_name)
Yt = np.zeros((n_pred, y_dim))
Xt, Yt ,self.id_sim= FEM_batch(Xt,self.id_sim)
else:
#
# READ SAMPLES FROM DIRECTORY
#
Y = read_txt(self.outData,errlog)
if self.do_mf:
if Y.shape[1] != self.Y_hf.shape[1]:
msg = 'Error importing input data: dimension inconsistent: high fidelity data have {} QoI column(s) but low fidelity model have {}.'.format(
self.Y_hf.shape[1], Y.shape[1])
errlog.exit(msg)
if Y.shape[1] != y_dim:
msg = 'Error importing input data: Number of dimension inconsistent: have {} QoI(s) but {} column(s).' \
.format(y_dim, Y.shape[1])
errlog.exit(msg)
if X.shape[0] != Y.shape[0]:
msg = 'Error importing input data: numbers of samples of inputs ({}) and outputs ({}) are inconsistent'.format(X.shape[0], Y.shape[0])
errlog.exit(msg)
thr_count = 0
self.t_sim_each = float("inf")
#
# GP function
#
if kernel == 'Radial Basis':
kr = GPy.kern.RBF(input_dim=x_dim, ARD=True)
elif kernel == 'Exponential':
kr = GPy.kern.Exponential(input_dim=x_dim, ARD=True)
elif kernel == 'Matern 3/2':
kr = GPy.kern.Matern32(input_dim=x_dim, ARD=True)
elif kernel == 'Matern 5/2':
kr = GPy.kern.Matern52(input_dim=x_dim, ARD=True)
if do_linear:
kr = kr + GPy.kern.Linear(input_dim=x_dim, ARD=True)
if not self.do_mf:
kg = kr
self.m_list = list()
for i in range(y_dim):
self.m_list = self.m_list + [GPy.models.GPRegression(X, Y[:, i][np.newaxis].transpose(), kernel=kg.copy(),normalizer=True)]
for parname in self.m_list[i].parameter_names():
if parname.endswith('lengthscale'):
exec('self.m_list[i].' + parname + '=self.len')
else:
kgs = emf.kernels.LinearMultiFidelityKernel([kr.copy(), kr.copy()])
if not self.hf_is_model:
if not X.shape[1]==self.X_hf.shape[1]:
msg = 'Error importing input data: dimension of low ({}) and high ({}) fidelity models (datasets) are inconsistent'.format(X.shape[1], self.X_hf.shape[1])
errlog.exit(msg)
if not self.lf_is_model:
if not X.shape[1]==self.X_lf.shape[1]:
msg = 'Error importing input data: dimension of low ({}) and high ({}) fidelity models (datasets) are inconsistent'.format(X.shape[1], self.X_hf.shape[1])
errlog.exit(msg)
if self.mf_case == 'data-model' or self.mf_case=='data-data':
X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays([X, self.X_hf], [Y, self.Y_hf])
elif self.mf_case == 'model-data':
X_list, Y_list = emf.convert_lists_to_array.convert_xy_lists_to_arrays([self.X_lf, X], [self.Y_lf, Y])
self.m_list = list()
for i in range(y_dim):
self.m_list = self.m_list + [GPyMultiOutputWrapper(emf.models.GPyLinearMultiFidelityModel(X_list, Y_list, kernel=kgs.copy(), n_fidelities=2), 2, n_optimization_restarts=15)]
#
# Verification measures
#
self.NRMSE_hist = np.zeros((1, y_dim), float)
self.NRMSE_idx = np.zeros((1, 1), int)
#leng_hist = np.zeros((1, self.m_list[0]._param_array_.shape[0]), int)
if self.do_predictive:
self.NRMSE_pred_hist = np.empty((1, y_dim), float)
#
# Run DoE
#
break_doe = False
print("======== RUNNING GP DoE ===========")
exit_code = 'count' # num iter
i = 0
x_new = np.zeros((0,x_dim))
n_new = 0
doe_off = False # false if true
while not doe_off:
t = time.time()
if self.doe_method == "random":
do_cal = True
elif self.doe_method == "pareto":
do_cal = True
elif np.mod(i, self.cal_interval) == 0:
do_cal = True
else:
do_cal = False
t_tmp = time.time()
[x_new, self.m_list, err, idx, Y_cv, Y_cv_var] = self.__design_of_experiments(X, Y, ac, ar, n_candi,
n_integ, self.m_list,
do_cal, nugget_opt, do_doe)
t_doe = time.time() - t_tmp
print('DoE Time: {:.2f} s'.format(t_doe))
if automate_doe:
if t_doe > self.t_sim_each:
break_doe = True
print('========>> DOE OFF')
n_left = n_iter - i
break
if not self.do_mf:
NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
else:
if self.mf_case == 'data-model' or self.mf_case == 'data-data':
NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf)
elif self.mf_case == 'model-data' :
NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
self.NRMSE_hist = np.vstack((self.NRMSE_hist, np.array(NRMSE_val)))
self.NRMSE_idx = np.vstack((self.NRMSE_idx, i))
if self.do_predictive:
Yt_pred = np.zeros((n_pred, y_dim))
for ny in range(y_dim):
y_pred_tmp, dummy = self.__predict(self.m_list[ny],Xt)
Yt_pred[:, ny] = y_pred_tmp.transpose()
if self.do_logtransform:
Yt_pred = np.exp(Yt_pred)
NRMSE_pred_val = self.__normalized_mean_sq_error(Yt_pred, Yt)
self.NRMSE_pred_hist = np.vstack((self.NRMSE_pred_hist, np.array(NRMSE_pred_val)))
if self.id_sim >= thr_count:
n_iter = i
exit_code = 'count'
doe_off = True
if not do_cal:
break_doe = False
n_left = 0
break
if np.max(NRMSE_val) < thr_NRMSE:
n_iter = i
exit_code = 'accuracy'
doe_off = True
if not do_cal:
break_doe = False
n_left = n_iter - i
break
if time.time() - t_init > thr_t - self.calib_time:
n_iter = i
exit_code = 'time'
doe_off = True
if not do_cal:
break_doe = False
n_left = n_iter - i
break
n_new = x_new.shape[0]
if not (n_new + self.id_sim < n_init + n_iter +1):
n_new = n_init + n_iter - self.id_sim
x_new = x_new[0:n_new, :]
i = self.id_sim + n_new
# y_new = np.zeros((n_new, y_dim))
# for ny in range(n_new):
# y_new[ny, :],self.id_sim = run_FEM(x_new[ny, :][np.newaxis],self.id_sim, self.rv_name)
x_new, y_new, self.id_sim = FEM_batch(x_new,self.id_sim)
#print(">> {:.2f} s".format(time.time() - t_init))
X = np.vstack([X, x_new])
Y = np.vstack([Y, y_new])
print("======== RUNNING GP Calibration ===========")
# not used
if break_doe:
X_tmp = np.zeros((n_left, x_dim))
Y_tmp = np.zeros((n_left, y_dim))
U = lhs(x_dim, samples=n_left)
for nx in range(x_dim):
# X[:,nx] = np.random.uniform(xrange[nx,0], xrange[nx,1], (1, n_init))
X_tmp[:, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
X_tmp, Y_tmp, self.id_sim = FEM_batch(X_tmp,self.id_sim)
# for ns in np.arange(n_left):
# Y_tmp[ns, :],self.id_sim = run_FEM(X_tmp[ns, :][np.newaxis],self.id_sim, self.rv_name)
# print(">> {:.2f} s".format(time.time() - t_init))
# if time.time() - t_init > thr_t - self.calib_time:
# X_tmp = X_tmp[:ns, :]
# Y_tmp = Y_tmp[:ns, :]
# break
X = np.vstack((X, X_tmp))
Y = np.vstack((Y, Y_tmp))
do_doe = False
# if not do_doe:
# exit_code = 'count'
#
# do_cal = True
# self.t_sim_each = float("inf") # so that calibration is not terminated in the middle
# self.m_list, Y_cv, Y_cv_var = self.__design_of_experiments(X, Y, 1, 1, 1, 1, self.m_list, do_cal,
# do_nugget, do_doe)
# if not self.do_mf:
# NRMSE_val = self.__normalized_mean_sq_error(Y_cv, Y)
# else:
# NRMSE_val = self.__normalized_mean_sq_error(Y_cv, self.Y_hf)
sim_time = time.time() - t_init
n_samp = Y.shape[0]
# import matplotlib.pyplot as plt
# if self.x_dim==1:
# if self.do_mf:
# for ny in range(y_dim):
#
# x_plot = np.linspace(0, 1, 200)[:, np.newaxis]
# X_plot = convert_x_list_to_array([x_plot, x_plot])
# X_plot_l = X_plot[:len(x_plot)]
# X_plot_h = X_plot[len(x_plot):]
#
# lf_mean_lin_mf_model, lf_var_lin_mf_model = self.__predict(self.m_list[ny],X_plot_l)
# lf_std_lin_mf_model = np.sqrt(lf_var_lin_mf_model)
# hf_mean_lin_mf_model, hf_var_lin_mf_model = self.__predict(self.m_list[ny],X_plot_h)
# hf_std_lin_mf_model = np.sqrt(hf_var_lin_mf_model)
#
#
# plt.plot(x_plot, lf_mean_lin_mf_model);
# plt.plot(x_plot, hf_mean_lin_mf_model, '-');
# plt.plot(X, Y[:,ny], 'x');
# plt.plot(self.X_hf,self.Y_hf[:,ny], 'x');
# plt.show()
# else:
# for ny in range(y_dim):
# x_plot = np.linspace(0, 1, 200)[:, np.newaxis]
#
# hf_mean_lin_mf_model, hf_var_lin_mf_model = self.__predict(self.m_list[ny], x_plot)
#
# plt.plot(x_plot, hf_mean_lin_mf_model, '-');
# plt.plot(X, Y[:, ny], 'x');
# plt.show()
#
#
# plt.plot(Y_cv[:,0], self.Y_hf[:,0], 'x'); plt.show()
# plt.show()
# plt.plot(Y_cv[:,1], Y[:,1], 'x')
# plt.show()
print('my exit code = {}'.format(exit_code))
print('1. count = {}'.format(self.id_sim))
print('2. max(NRMSE) = {}'.format(np.max(NRMSE_val)))
print('3. time = {:.2f} s'.format(sim_time))
# for user information
if do_simulation:
n_err = 1000
Xerr = np.zeros((n_err, x_dim))
U = lhs(x_dim, samples=n_err)
for nx in range(x_dim):
Xerr[:, nx] = U[:, nx] * (self.xrange[nx, 1] - self.xrange[nx, 0]) + self.xrange[nx, 0]
y_pred_var = np.zeros((n_err, y_dim))
y_data_var = np.zeros((n_err, y_dim))
for ny in range(y_dim):
# m_tmp = self.m_list[ny].copy()
m_tmp = self.m_list[ny]
if self.do_logtransform:
#y_var_val = np.var(np.log(Y[:, ny]))
log_mean = np.mean(np.log(Y[:, ny]))
log_var = np.var(np.log(Y[:, ny]))
y_var_val = np.exp(2*log_mean+log_var)*(np.exp(log_var)-1) # in linear space
else:
y_var_val = np.var(Y[:, ny])
for ns in range(n_err):
y_pred_tmp, y_pred_var_tmp = self.__predict(m_tmp,Xerr[ns, :][np.newaxis])
if self.do_logtransform:
y_pred_var[ns, ny] = np.exp(2 * y_pred_tmp + y_pred_var_tmp) * (np.exp(y_pred_var_tmp) - 1)
else:
y_pred_var[ns, ny] = y_pred_var_tmp
y_data_var[ns, ny] = y_var_val
#for parname in m_tmp.parameter_names():
# if ('Mat52' in parname) and parname.endswith('variance'):
# exec('y_pred_prior_var[ns,ny]=m_tmp.' + parname)
#error_ratio1_Pr = (y_pred_var / y_pred_prior_var)
error_ratio2_Pr = (y_pred_var / y_data_var)
#np.max(error_ratio1_Pr, axis=0)
np.max(error_ratio2_Pr, axis=0)
self.perc_thr = np.hstack([np.array([1]), np.arange(10, 1000, 50), np.array([999])])
error_sorted = np.sort(np.max(error_ratio2_Pr, axis=1), axis=0)
self.perc_val = error_sorted[self.perc_thr] # criteria
self.perc_thr = 1 - (self.perc_thr) * 0.001 # ratio=simulation/sampling
corr_val = | np.zeros((y_dim,)) | numpy.zeros |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = | np.array([]) | numpy.array |
# coding: UTF-8
import numpy as np
import torch
import time
from utils import build_iterator, get_time_dif
from importlib import import_module
from tqdm import tqdm
from generate_data import cut_para_many_times
PAD, CLS = '[PAD]', '[CLS]' # padding符号, bert中综合信息符号
min_length = 64
label2class = {
"财经" : "高风险",
"时政" : "高风险",
"房产" : "中风险",
"科技" : "中风险",
"教育" : "低风险",
"时尚" : "低风险",
"游戏" : "低风险",
"家居" : "可公开",
"体育" : "可公开",
"娱乐" : "可公开",
}
label2num = {
"财经" : 0,
"时政" : 1,
"房产" : 2,
"科技" : 3,
"教育" : 4,
"时尚" : 5,
"游戏" : 6,
"家居" : 7,
"体育" : 8,
"娱乐" : 9,
}
num2label = {
0 : "财经",
1 : "时政",
2 : "房产",
3 : "科技",
4 : "教育",
5 : "时尚",
6 : "游戏",
7 : "家居",
8 : "体育",
9 : "娱乐"
}
class Predict_Baseline():
"""
第一种预测方法
不对预测的句子做任何处理
就直接尾部截断预测
优点: 快? 因为直接截断,数据量小了很多
问题: 无法看到篇章的全部信息
可能会继续做的方法(咕咕咕):
1. 把预测的序列变成多个,然后综合每个预测结果做出最终预测
2. 对篇章关键词抽取 / ... 等可能有用的方法, 然后建图,做谱聚类 (好像很难写...)
"""
def __init__(self, dataset, config):
self.dataset = dataset
self.config = config
pass
def load_dataset(self, path, pad_size):
contents = []
config = self.config
with open(path, 'r', encoding='utf-8') as fin:
cnt = 0
for line in tqdm(fin):
lin = line.strip()
if not lin:
continue
cnt += 1
if cnt == 1:
continue
# print(cnt, lin + '\n\n\n')
pos = lin.find(',')
id = lin[:pos]
content = lin[pos + 1:]
# print('?????????? : ', id, content + '\n\n')
token = config.tokenizer.tokenize(content)
token = [CLS] + token
seq_len = len(token)
mask = []
token_ids = config.tokenizer.convert_tokens_to_ids(token)
if pad_size:
if len(token) < pad_size:
mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
token_ids += ([0] * (pad_size - len(token)))
else:
mask = [1] * pad_size
token_ids = token_ids[:pad_size]
seq_len = pad_size
contents.append((token_ids, int(id), seq_len, mask))
# print('\nlen(contents) : ', str(len(contents))+'\n')
return contents
def build_dataset(self, path):
# 加载数据集
# [(tokens, int(id), seq_len, mask)]
config = self.config
print('\nloading predict set ...')
predict_data = self.load_dataset(path, config.pad_size)
print('Done!')
self.predict_iter = build_iterator(predict_data, config)
def evaluate(self, model):
config = self.config
predict_iter = self.predict_iter
model.eval()
predict_all = np.array([], dtype=int)
with torch.no_grad():
for texts, ids in tqdm(predict_iter):
outputs = model(texts)
# print('outputs : ', outputs)
ids = ids.data.cpu().numpy()
predict_label = torch.max(outputs.data, 1)[1].cpu().numpy()
predict_all = np.append(predict_all, predict_label)
return predict_all
def predict(self, model):
config = self.config
predict_iter = self.predict_iter
model.load_state_dict(torch.load(config.save_path))
model.eval()
start_time = time.time()
print('prediction ...')
predict_labels = self.evaluate(model)
time_dif = get_time_dif(start_time)
print('Done !')
print('prediction usage:',time_dif)
return predict_labels
def write_csv(self, labels, path):
with open(path, 'w') as fout:
cnt = 0
fout.write('id,class_label,rank_label'+'\n')
for label in labels:
fout.write(str(cnt) + ',' + num2label[label] + ',' + label2class[num2label[label]] + '\n')
cnt += 1
class Predict_Cut_Paras():
"""
方法二 篇章切割,综合结果预测
type = 1 表示label投票
type = 2 表示得分softmax之和
type = 3 表示得分之和
others TBD -> ERROR
"""
def __init__(self, dataset, config, type=1):
self.dataset = dataset
self.config = config
self.type = type
if type == 1 or type == 2 or type == 3:
pass
else:
raise ValueError
def load_dataset(self, path, pad_size):
contents = []
config = self.config
# 篇章切割
print('cut paras ...')
start_time = time.time()
with open(path, 'r', encoding='utf-8') as fin:
cnt = 0
data = []
for line in tqdm(fin):
lin = line.strip()
if not line:
continue
cnt += 1
if cnt == 1:
continue
pos = lin.find(',')
id = lin[:pos]
content = lin[pos + 1:]
paras = cut_para_many_times(content)
for para in paras:
#if len(para) < min_length:
# continue
data.append((int(id), para))
print('Done!')
print('\nparas:',len(data))
print('Time usage:',get_time_dif(start_time))
print('\n Getting tokens ...')
for id, content in tqdm(data):
token = config.tokenizer.tokenize(content)
token = [CLS] + token
seq_len = len(token)
mask = []
token_ids = config.tokenizer.convert_tokens_to_ids(token)
if pad_size:
if len(token) < pad_size:
mask = [1] * len(token_ids) + [0] * (pad_size - len(token))
token_ids += ([0] * (pad_size - len(token)))
else:
mask = [1] * pad_size
token_ids = token_ids[:pad_size]
seq_len = pad_size
contents.append((token_ids, int(id), seq_len, mask))
# print('\nlen(contents) : ', str(len(contents))+'\n')
return contents
def build_dataset(self, path):
# 加载数据集
# [(tokens, int(id), seq_len, mask)]
config = self.config
print('\nloading predict set ...')
predict_data = self.load_dataset(path, config.pad_size)
print('Done!')
self.predict_iter = build_iterator(predict_data, config)
def evaluate(self, model):
config = self.config
predict_iter = self.predict_iter
model.eval()
predict_all = np.array([], dtype=int)
id_all = np.array([], dtype=int)
score_all = np.array([[]], dtype=int)
with torch.no_grad():
for texts, ids in tqdm(predict_iter):
outputs = model(texts)
# print('outputs : ', outputs)
ids = ids.data.cpu().numpy()
predict_label = torch.max(outputs.data, 1)[1].cpu().numpy()
predict_all = np.append(predict_all, predict_label)
id_all = np.append(id_all, ids)
score_all = np.append(score_all, outputs.data.cpu().numpy())
if self.type == 1:
return predict_all, id_all
elif self.type == 2:
return score_all, id_all
elif self.type == 3:
return score_all, id_all
def predict(self, model):
config = self.config
model.load_state_dict(torch.load(config.save_path))
model.eval()
start_time = time.time()
print('prediction ...')
predict_labels, ids = self.evaluate(model)
time_dif = get_time_dif(start_time)
print('Done !')
print('prediction usage:',time_dif)
return predict_labels, ids
def softmax(self, score):
score = np.array(score)
score = | np.exp(score) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generator to yield resampled volume data for training and validation
"""
# %%
from keras.models import load_model, Model
from matplotlib import pyplot as plt
import numpy as np
import os
from os import path
import random
import SimpleITK as sitk
from stl import mesh
from utils import data_loading_funcs as dlf
from utils import mhd_utils as mu
from utils import reg_evaluator as regev
from utils import volume_resampler_3d as vr
import tensorflow as tf
from utils import registration_reader as rr
import scipy
#from augment_data import augment
# %%
class VolumeDataGenerator(object):
"""Generate volume image for training or validation
#Arguments
"""
def __init__(self,
data_folder,
case_num_range,
case_num_range_2=None,
max_registration_error = 20.0):
self.data_folder = data_folder
cases = []
# Go through all the case
for caseIdx in range(case_num_range[0], case_num_range[1]+1):
caseFolder = 'Case{:04d}'.format(caseIdx)
full_case = path.join(data_folder, caseFolder)
if not path.isdir(full_case):
continue
else:
cases.append(caseIdx)
if case_num_range_2 != None:
for caseIdx in range(case_num_range_2[0], case_num_range_2[1]+1):
caseFolder = 'Case{:04d}'.format(caseIdx)
full_case = path.join(data_folder, caseFolder)
if not path.isdir(full_case):
continue
else:
cases.append(caseIdx)
self.good_cases = np.asarray(cases, dtype=np.int32)
self.num_cases = self.good_cases.size
random.seed()
self.e_t = 0.5
self.e_rot = 1
self.isMultiGauss = False
self.max_error = max_registration_error
print('VolumeDataGenerator: max_registration_error = {}'.format(self.max_error))
#self.width, self.height, self.depth = 96, 96, 32
# ----- #
def get_sample_multi_gauss(self,mean,cov):
return np.random.multivariate_normal(mean,cov)
def get_num_cases(self):
return self.num_cases
# ----- #
def _get_random_value(self, r, center, hasSign):
randNumber = random.random() * r + center
if hasSign:
sign = random.random() > 0.5
if sign == False:
randNumber *= -1
return randNumber
# ----- #
def get_array_from_itk_matrix(self, itk_mat):
mat = np.reshape(np.asarray(itk_mat), (3,3))
return mat
# ----- #
def generate(self, shuffle=True, shape=(96,96,96)):
"""
"""
currentIdx = 0
np.random.seed()
(width, height, depth) = shape
print('Shuffle = {}'.format(shuffle))
while True:
idx = currentIdx % self.num_cases
currentIdx += 1
# Shuffle cases
if idx == 0:
if shuffle:
case_array = np.random.permutation(self.good_cases)
else:
case_array = self.good_cases
case_no = case_array[idx]
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
#sampledFixed, sampledMoving, pos_neg, err, params = self.create_sample(450, shape)
print('Sample generated frome Case{:04d}'.format(case_no))
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
yield sample4D, err, params
# ----- #
def generate_batch(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_errors = []
batch_params = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_errors.append([err])
batch_params.append(params)
#yield (batch_samples, [np.asarray(batch_errors), np.asarray(batch_params)])
yield (batch_samples, np.asarray(batch_params))
#yield (batch_samples, np.asarray(batch_errors))
def generate_batch_classification(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 4), dtype=np.ubyte)
#batch_labels = []
batch_labels = []
batch_errs = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed_i, sampledFixed_f, sampledMoving_i, sampledMoving_f, label, err1, err2 = self.create_sample_classification(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 4), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed_i)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving_i)
sample4D[:,:,:,2] = sitk.GetArrayFromImage(sampledFixed_f)
sample4D[:,:,:,3] = sitk.GetArrayFromImage(sampledMoving_f)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_labels.append(label)
batch_errs.append([err1, err2])
yield (batch_samples, [np.asarray(batch_labels), np.asarray(batch_errs)])
def generate_batch_NIH(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_errors = []
batch_params = []
batch_segs = []
batch_trans = []
batch_case_nums = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params, segMesh, trans = self.create_sample_NIH(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_errors.append([err])
batch_params.append(params)
batch_segs.append(segMesh)
batch_trans.append(trans)
batch_case_nums.append(case_no)
yield (batch_samples, batch_params)
def generate_batch_NIH_transform_prediction(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_transforms = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_transforms.append(params)
#batch_errors.append([err])
yield (batch_samples, batch_transforms)
def generate_batch_NIH_transform_prediction_2D_multiview(self, batch_size=32, shape=(224,222,220)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
slice_num = 3
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
ax_batch_samples = np.zeros((current_batch_size, height, width, 2, slice_num), dtype=np.ubyte)
sag_batch_samples = np.zeros((current_batch_size, depth, height, 2, slice_num), dtype=np.ubyte)
cor_batch_samples = np.zeros((current_batch_size, depth, width, 2, slice_num), dtype=np.ubyte)
#batch_labels = []
batch_transforms = []
ax_transforms = []
sag_transforms = []
cor_transforms = []
batch_errors = []
batch_segs = []
batch_affines = []
batch_tX = []
batch_tY = []
batch_tZ = []
batch_rotX = []
batch_rotY = []
batch_rotZ = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
ax_sample = np.zeros((height, width, 2, slice_num), dtype=np.ubyte)
sag_sample = np.zeros((depth, height, 2, slice_num), dtype=np.ubyte)
cor_sample = np.zeros((depth, width, 2, slice_num), dtype=np.ubyte)
MR = sitk.GetArrayFromImage(sampledFixed)
TRUS = sitk.GetArrayFromImage(sampledMoving)
ax_sample[:,:,0,:] = np.reshape(MR[int(depth/2)-int((slice_num-1)/2):int(depth/2)+int((slice_num)/2)+1,:,:], (height, width, slice_num))
ax_sample[:,:,1,:] = np.reshape(TRUS[int(depth/2)-int((slice_num-1)/2):int(depth/2)+int((slice_num)/2)+1,:,:], (height, width, slice_num))
sag_sample[:,:,0,:] = np.reshape(MR[:,:,int(width/2)-int((slice_num-1)/2):int(width/2)+int((slice_num)/2)+1], (depth, height, slice_num))
sag_sample[:,:,1,:] = np.reshape(TRUS[:,:,int(width/2)-int((slice_num-1)/2):int(width/2)+int((slice_num)/2)+1], (depth, height, slice_num))
cor_sample[:,:,0,:] = np.reshape(MR[:,int(height/2)-int((slice_num-1)/2):int(height/2)+int((slice_num)/2)+1,:], (depth, width, slice_num))
cor_sample[:,:,1,:] = np.reshape(TRUS[:,int(height/2)-int((slice_num-1)/2):int(height/2)+int((slice_num)/2)+1,:], (depth, width, slice_num))
ax_batch_samples[k, :,:,:,:] = ax_sample
sag_batch_samples[k, :,:,:,:] = sag_sample
cor_batch_samples[k, :,:,:,:] = cor_sample
#batch_labels.append(pos_neg)
#params = tuple(-1*np.asarray(params))
batch_transforms.append(params)
ax_transforms.append([params[0], params[1], params[5]])
sag_transforms.append([params[1], params[2], params[3]])
cor_transforms.append([params[0], params[2], params[4]])
batch_errors.append([err])
batch_tX.append(params[0])
batch_tY.append(params[1])
batch_tZ.append(params[2])
batch_rotX.append(params[3])
batch_rotY.append(params[4])
batch_rotZ.append(params[5])
#batch_segs.append(segMesh)
#batch_affines.append(trans)
yield ([ax_batch_samples, sag_batch_samples, cor_batch_samples], [np.asarray(batch_tX),np.asarray(batch_tY),np.asarray(batch_tZ),np.asarray(batch_rotX),np.asarray(batch_rotY),np.asarray(batch_rotZ),np.asarray(batch_transforms)])
def generate_batch_3D_transform_prediction(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_transforms = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params, segMesh, trans = self.create_sample_NIH(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_transforms.append(params)
#batch_errors.append([err])
yield (batch_samples, batch_transforms)
def generate_batch_US_regression(self, batch_size=32, shape=(96,96,32)):
"""
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_params = np.zeros((current_batch_size, 6), dtype=np.float)
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_params[k,:] = params
yield (batch_samples, batch_params)
def generate_batch_US_regression_siamese(self, batch_size=32, shape=(96,96,32)):
"""
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 1), dtype=np.ubyte)
batch_samples_GT = np.zeros((current_batch_size, depth, height, width, 1), dtype=np.ubyte)
#batch_labels = []
batch_params = np.zeros((current_batch_size, 6), dtype=np.float)
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, sampledMovingGT, err, params = self.create_sample_MRUS2US(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 1), dtype=np.ubyte)
sample4D_GT = np.zeros((depth, height, width, 1), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledMoving)
sample4D_GT[:,:,:,0] = sitk.GetArrayFromImage(sampledMovingGT)
batch_samples[k, :,:,:,:] = sample4D
batch_samples_GT[k, :,:,:,:] = sample4D_GT
#batch_labels.append(pos_neg)
batch_params[k,:] = params
yield ([batch_samples, batch_samples_GT], batch_params)
def generate_batch_transformation_regression(self, batch_size=32, shape=(96,96,32)):
"""
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
#batch_labels = []
batch_params = np.zeros((current_batch_size, 6), dtype=np.float)
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, sampledMovingGT, err, params = self.create_sample_MRUS2US(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledMoving)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledFixed)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
batch_params[k,:] = params
yield (batch_samples, batch_params)
def generate_batch_GAN_AE(self, batch_size=32, shape=(96,96,32), MR_TRUS='MR'):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 1), dtype=np.ubyte)
valid = np.ones(current_batch_size,1)
#batch_labels = []
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
# Put into 4D array
sample4D = np.zeros((depth, height, width, 1), dtype=np.ubyte)
if MR_TRUS == 'MR':
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
else:
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
#batch_labels.append(pos_neg)
yield (batch_samples)
def generate_batch_AIRNet(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
batch_samples_GT = np.zeros((current_batch_size, depth, height, width, 2), dtype=np.ubyte)
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, sampledMovingGT, err, params = self.create_sample_MRUS2US(case_no, shape)
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMoving)
batch_samples[k, :,:,:,:] = sample4D
sample4D = np.zeros((depth, height, width, 2), dtype=np.ubyte)
sample4D[:,:,:,0] = sitk.GetArrayFromImage(sampledFixed)
sample4D[:,:,:,1] = sitk.GetArrayFromImage(sampledMovingGT)
batch_samples_GT[k, :,:,:,:] = sample4D
yield (batch_samples, batch_samples_GT)
def generate_batch_2D_AEMRax(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, height, width, 1), dtype=np.ubyte)
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
sample4D = np.zeros((height, width, 1), dtype=np.ubyte)
sample4D[:,:,0] = sitk.GetArrayFromImage(sampledFixed)[random.randint(0,sitk.GetArrayFromImage(sampledFixed).shape[0]-1)]
batch_samples[k,:,:,:] = sample4D
yield (batch_samples, batch_samples)
def generate_batch_2D_AEUSax(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, height, width, 1), dtype=np.ubyte)
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, err, params = self.create_sample(case_no, shape)
sample4D = np.zeros((height, width, 1), dtype=np.ubyte)
sample4D[:,:,0] = sitk.GetArrayFromImage(sampledMoving)[random.randint(0,sitk.GetArrayFromImage(sampledMoving).shape[0]-1)]
batch_samples[k,:,:,:] = sample4D
yield (batch_samples, batch_samples)
def generate_batch_2D_MRUS_recon(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = np.random.permutation(self.good_cases)
#current_index = (batch_index * batch_size) % self.num_cases
current_index = batch_index * batch_size
if (current_index + batch_size) < self.num_cases:
current_batch_size = batch_size
batch_index += 1
else:
# handle special case where only 1 sample left for the batch
if (self.num_cases - current_index) > 1:
current_batch_size = self.num_cases - current_index
else:
current_batch_size = 2
current_index -= 1
batch_index = 0
batch_samples = np.zeros((current_batch_size, height, width, 2), dtype=np.ubyte)
for k in range(current_batch_size):
case_no = case_array[k + current_index]
#print(case_no)
sampledFixed, sampledMoving, sampledMovingGT, err, params = self.create_sample_MRUS2US(case_no, shape)
MR = sitk.GetArrayFromImage(sampledFixed)
US = sitk.GetArrayFromImage(sampledMovingGT)
idx = random.randint(0,MR.shape[0]-1)
MR_ax = MR[idx]
US_ax = US[idx]
for i in range(US_ax.shape[0]):
for j in range(US_ax.shape[1]):
if US_ax[i][j] == 0:
MR_ax[i][j] = 0
sample4D = np.zeros((height, width, 2), dtype=np.ubyte)
sample4D[:,:,0] = MR_ax
sample4D[:,:,1] = US_ax
batch_samples[k,:,:,:] = sample4D
yield (np.reshape(batch_samples[:,:,:,0],(current_batch_size,height,width,1)), [np.reshape(batch_samples[:,:,:,0],(current_batch_size,height,width,1)), np.reshape(batch_samples[:,:,:,1],(current_batch_size,height,width,1))])
def generate_batch_2D_MRUSax(self, batch_size=32, shape=(96,96,32)):
"""Used for keras training and validation
"""
batch_index = 0
np.random.seed()
(width, height, depth) = shape
while True:
# Shuffle cases
if batch_index == 0:
case_array = | np.random.permutation(self.good_cases) | numpy.random.permutation |
# -*- coding: iso-8859-1 -*-
"""
Create files (from Rugheimer metadata) that give the atmospheric profile, i.e. mixing ratio, temperature and pressure as a function of altitude.
Since the Rugheimer T/P and mixing ratio files are generated from different codes, they have different abscissa, and so different files are generated for them. Interpolation is used in our code to match the two files.
"""
import numpy as np
import pdb
import matplotlib.pyplot as plt
import scipy.stats
from scipy import interpolate as interp
import cookbook
def extract_profiles_primitive_earth_rugheimer():
"""
Purpose of this code is to form spectra, mixing ratio files, and T/P profiles for the revised Rugheimer Epoch 0 (3.9 Ga) Earth models. This is to triangulate the sources of our differences.
"""
#####Zeroth: set value of constants, specify filenames
import cookbook
filename='./Raw_Data/Rugheimer_Metadata/outchem_Ep0_A0.2_Frac1.0.dat'
bar2Ba=1.0e6 #1 bar in Ba
k=1.3806488e-16 #Boltzmann Constant in erg/K
#####First, form the spectra for comparison.
importeddata=np.genfromtxt(filename, skip_header=290, skip_footer=1277)
#Remove the first wavelength bin which corresponds to Lyman Alpha and which does not have a bin width that fits with its neighbors.
rugheimer_wav_centers=importeddata[1:,1]/10. #Convert wavelengths from Angstroms to nm
rugheimer_s=importeddata[1:,4] #ratio of 4piJ(surf)/I_0
rugheimer_s[19]=3.16548e-128 #one element of rugheimer_s has value 3.16548e-128. Python has trouble with this and imports as a NaN. Here, we manually set its value.
###Form wavelength bins from Rugheimer wavelength centers
rugheimer_wav_bin_leftedges=np.zeros(len(rugheimer_wav_centers))
rugheimer_wav_bin_rightedges=np.zeros(len(rugheimer_wav_centers))
#First ten FUV fluxes are 5 nm (50 A) bins (email from <EMAIL>, 3/12/2015)
rugheimer_wav_bin_leftedges[0:9]=rugheimer_wav_centers[0:9]-2.5
rugheimer_wav_bin_rightedges[0:9]=rugheimer_wav_centers[0:9]+2.5
#Remainder of FUV fluxes are taken from a file that sarah sent me (<EMAIL>, 3/12/2015)
del importeddata
importeddata=np.genfromtxt('./Raw_Data/Rugheimer_Metadata/Active_M9_Teff2300_photo.pdat', skip_header=1, skip_footer=0)
rugheimer_wav_bin_leftedges[9:]=importeddata[:,2]*0.1 #convert A to nm
rugheimer_wav_bin_rightedges[9:]=importeddata[:,3]*0.1 #convert A to nm
####Check that bins are correct:
###print np.sum(rugheimer_wav_centers-0.5*(rugheimer_wav_bin_leftedges+rugheimer_wav_bin_rightedges)) #0 to within 1e-12 rounding error.
###Rebin Claire et al input.
#Import 0.01-nm resolution Claire et al 3.9 Ga Sun model.
del importeddata
importeddata=np.genfromtxt('./Raw_Data/Claire_Model/claire_youngsun_highres.dat', skip_header=1, skip_footer=0)
claire_wav=importeddata[:,0] #nm, 0.01 nm resolution
claire_fluxes=importeddata[:,1]#erg/s/cm2/nm
#Bin Claire et al model to resolution of Rugheimer model
claire_fluxes_rebinned=np.zeros(len(rugheimer_wav_centers))
claire_wav_rebinned=np.zeros(len(claire_fluxes_rebinned))#This should be redundant with rugheimer_wav_centers. We include it as a check statistic that the rebinning is proceeding appropriately.
for ind in range(0, len(rugheimer_wav_centers)):
min_wav=rugheimer_wav_bin_leftedges[ind]
max_wav=rugheimer_wav_bin_rightedges[ind]
inds=(claire_wav >= min_wav) & (claire_wav <= max_wav)
claire_fluxes_rebinned[ind]=np.mean(claire_fluxes[inds])
claire_wav_rebinned[ind]=np.mean(claire_wav[inds]) #check statistic.
###print np.sum((claire_wav_rebinned-rugheimer_wav_centers)/rugheimer_wav_centers) #check statistic. Good to within 1e-5 in all cases. Any problems caused by slight misalignment from 0.01 due to rounding error. Good enough.
###Compute bottom-of-atmosphere actinic flux, which is what is reported in Rugheimer+2015.
rugheimer_ground_energies=claire_fluxes_rebinned*rugheimer_s
#Let's print out the results
spectable=np.zeros([len(rugheimer_wav_bin_leftedges),5])
spectable[:,0]=rugheimer_wav_bin_leftedges
spectable[:,1]=rugheimer_wav_bin_rightedges
spectable[:,2]=rugheimer_wav_centers
spectable[:,3]=claire_fluxes_rebinned
spectable[:,4]=rugheimer_ground_energies
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) Solar Flux at Earth (erg/s/nm/cm2) 3.9 Ga BOA Intensity (erg/s/nm/cm2)\n'
f=open('./LiteratureSpectra/rugheimer_epoch0_recomputed_A0.2.dat', 'w')
f.write(header)
np.savetxt(f, spectable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Second, form the mixing ratio files
importeddata1=np.genfromtxt(filename, skip_header=779, skip_footer=873) #O2, O3, H2O
importeddata2=np.genfromtxt(filename, skip_header=837, skip_footer=817) #CH4, SO2
importeddata4=np.genfromtxt(filename, skip_header=958, skip_footer=704) #N2, CO2
#Let's print out the results. We have established that the z values are the same, so can use a common block
printtable=np.zeros([np.shape(importeddata1)[0],9])
printtable[:,0]=importeddata1[:,0] #altitude in cm
#N2 and CO2: We use the values from this block rather than block 1 because rugheimer et al force it to these values in their code, regardless of what the photochemistry code wants to do.
printtable[:,1]=importeddata4[:,2] #N2.
printtable[:,2]=importeddata4[:,1] #CO2
#The rest are normal
printtable[:,3]=importeddata1[:,3] #H2O
printtable[:,4]=importeddata2[:,2] #CH4
printtable[:,5]=importeddata2[:,9] #SO2
printtable[:,6]=importeddata1[:,2] #O2
printtable[:,7]=importeddata1[:,8] #O3
#printtable[:,8]# H2S; left as zeros since not included in Rugheimer model
#print np.sum(printtable[:,1:],1)
#pdb.set_trace()
header0='Extracted from Rugheimer outchem_Ep0_A0.2_Frac1.0.dat\n'
header1='Z (cm) N2 CO2 H2O CH4 SO2 O2 O3 H2S \n'
f=open('./MixingRatios/rugheimer_earth_epoch0_recomputed_A0.2_mixingratios_v2.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Third, form the T/P profiles
#Extract temperature and pressure profile from climate model output
#For whatever reason the very last line of the table is doubled. We remove this.
importeddata=np.genfromtxt(filename, skip_header=1568, skip_footer=104)
model_z=importeddata[:-1,0] #altitude in cm
model_t=importeddata[:-1,1] #temperature in K
model_n=importeddata[:-1,3] #number density in cm**-3.
model_p=importeddata[:-1,4] #pressure, in bar (based on text in draft manuscript sent to me by <NAME>)
#Let's print out the results
printtable=np.zeros([len(model_z)+1,4])
printtable[1:,0]=model_z
printtable[1:,1]=model_t
printtable[1:,2]=model_n
printtable[1:,3]=model_p
#Rugheimer data file does not explicitly include t, P, n at z=0 (Surface). Our code requires z=0 data. To reconcile, we include these data manually as follows:
printtable[0,0]=0. #z=0 case
printtable[0,3]=1. #In the paper, p=1.0 bar at surface is specified
printtable[0,1]=292.95 #From linear extrapolation from z=0.5 km and z=1.5 km points
printtable[0,2]= 1.*bar2Ba/(k*292.95)#Compute number density self-consistently from temperature, pressure via Ideal Gas Law as is done elsewhere (n [cm**-3] = p [Barye]/(k*T [K])
header0='Extracted from Rugheimer outchem_Ep0_A0.2_Frac1.0.dat\n'
header1='Z (cm) T (K) DEN (cm**-3) P (bar) \n'
f=open('./TPProfiles/rugheimer_earth_epoch0_recomputed_A0.2_atmosphereprofile.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#extract_profiles_primitive_earth_rugheimer()
def extract_profiles_modern_earth_rugheimer():
"""
Purpose of this code is to form spectra, mixing ratio files, and T/P profiles for the Rugheimer+2014 modern Earth surface UV models. This is a test case.
"""
#####Zeroth: set value of constants, specify filenames
import cookbook
filename='./Raw_Data/Rugheimer_Metadata/output_couple_Sun_100.dat'
bar2Ba=1.0e6 #1 bar in Ba
k=1.3806488e-16 #Boltzmann Constant in erg/K
#####First, form the spectra for comparison.
#Extract spectra from Rugheimer file
importeddata=np.genfromtxt(filename, skip_header=286, skip_footer=102)
#Remove the first wavelength bin which corresponds to Lyman Alpha and which does not have a bin width that fits with its neighbors.
spec_wav=importeddata[1:,0]*0.1 #A to nm
spec_top=importeddata[1:,1]*1.e3 #W/m^2/nm to erg/cm^2/s/nm
spec_gnd=importeddata[1:,2]*1.e3 #W/m^2/nm to erg/cm^2/s/nm
#two elements of the file are not importing correctly, set them manually here
spec_gnd[23]=2.92059e-121*1.e3
spec_gnd[24]=1.57780e-102 *1.e3
#Next, extract the edges of the spectral bins.
bin_left_edges=np.zeros(np.shape(spec_wav))
bin_right_edges=np.zeros(np.shape(spec_wav))
#first 9 bins are 5-nm (50 angstrom) wide bins (See faruv_sun.pdat)
bin_left_edges[0:9]=spec_wav[0:9]-2.5
bin_right_edges[0:9]=spec_wav[0:9]+2.5
#The edges for the rest of the bins can be taken from G2V_photo.pdat:
importeddata=np.genfromtxt('./Raw_Data/Rugheimer_Metadata/G2V_photo.pdat', skip_header=1, skip_footer=0)
bin_left_edges[9:]=importeddata[:,2]*0.1 #convert from A to nm
bin_right_edges[9:]=importeddata[:,3]*0.1 #convert from A to nm
###let's validate our bin edges by computing the bin centers and making sure the residuals aren't too high
##diff=(0.5*(bin_left_edges+bin_right_edges)-spec_wav)#/spec_wav
##print diff
##print np.max(np.abs(diff))
###this test shows very slight offsets, at the 0.05 nm level at maximum. Should not affect results given bins are >1nm in width.
#Let's print out the results
printtable=np.zeros([len(bin_left_edges),5])
printtable[:,0]=bin_left_edges
printtable[:,1]=bin_right_edges
printtable[:,2]=spec_wav
printtable[:,3]=spec_top
printtable[:,4]=spec_gnd
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) TOA Flux (erg/s/nm/cm2) BOA Actinic Flux (erg/s/nm/cm2) \n'
f=open('./LiteratureSpectra/rugheimer_earth_modern.dat', 'w')
f.write(header)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Second, form the mixing ratio files
importeddata1=np.genfromtxt(filename, skip_header=78, skip_footer=323) #water, methane
importeddata2=np.genfromtxt(filename, skip_header=182, skip_footer=222) #ozone, must derive from number density
#Let's print out the results. We have established that the z values are the same, so can use a common block
printtable=np.zeros([np.shape(importeddata1)[0],9])
printtable[:,0]=importeddata1[:,0]*1.e5 #altitude in cm (converted from km)
#N2 O2, and CO2: Well-mixed
#H2O, CH4, O3: tracked through atmosphere
#SO2: Not tracked. Assume 0.
printtable[:,1]=printtable[:,1]+ 0.78#N2; level tuned to assure 1 bar of surface pressure. Earth mean value given here.
printtable[:,2]=printtable[:,2]+355.e-6 #CO2; level directly quoted in paper
printtable[:,3]=importeddata1[:,2] #H2O
printtable[:,4]=importeddata1[:,4] #CH4
#printtable[:,5]=printtable[:,5] #SO2; left as zeros since not included in the model
printtable[:,6]=printtable[:,6]+0.21 #O2; level directly quoted in paper
printtable[:,7]=importeddata2[:,4]/importeddata2[:,2]#O3
#printtable[:,8]=printtable[:,8]# H2S; left as zeros since not included in the model
header0='Extracted from Rugheimer output_couple_Sun_100.dat\n'
header1='Z (cm) N2 CO2 H2O CH4 SO2 O2 O3 H2S\n'
f=open('./MixingRatios/rugheimer_earth_modern_mixingratios_v2.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
####Third, form the T/P profiles
N_A=6.022e23 #Avogadro's number
bar2Ba=1.0e6 #1 bar in Ba
atm2bar=1.01325 #1 atm in bar
k=83.14472/N_A #Boltzman constant in bar*cm^3/K, converted from bar*cm^3/(K*mol) (from http://www.engineeringtoolbox.com/individual-universal-gas-constant-d_588.html)
#Extract temperature and pressure profile from climate model output
importeddata=np.genfromtxt(filename, skip_header=409, skip_footer=0)
model_z=importeddata[::-1,1]*1.e5 #altitude in cm, converted from km
model_t=importeddata[::-1,2] #temperature in K
model_p=importeddata[::-1,0]*atm2bar #pressure, in bar, converted from atm.
model_n=model_p/(model_t*k) #number density in cm**-3, computed from ideal gas law.
#Let's print out the results
printtable=np.zeros([len(model_z),4])
printtable[:,0]=model_z
printtable[:,1]=model_t
printtable[:,2]=model_n
printtable[:,3]=model_p
header0='Extracted from Rugheimer output_couple_Sun_100.dat\n'
header1='Z (cm) T (K) DEN (cm**-3) P (bar) \n'
f=open('./TPProfiles/rugheimer_earth_modern_atmosphereprofile.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, printtable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#extract_profiles_modern_earth_rugheimer()
def form_profiles_wuttke():
"""
Purpose of this code is to form the feedstock files to replicat the Wuttke+2006 Antarctic diffuse radiance measurements
"""
import cookbook
#First, form the spectral file.
#Define spectral bins. 0.25 nm from 280-500 nm, 1 nm from 500-1000 nm. We just go to 900 since that's what our data is good to. Also we start at 292.75 because that's where our graphclicked data starts
bin_left_edges=np.concatenate((np.arange(292.75,500.,0.25),np.arange(500., 900.,1.)))
bin_right_edges=np.concatenate((np.arange(293.,500.25,0.25),np.arange(501., 901.,1.)))
bin_centers=0.5*(bin_left_edges+bin_right_edges)
#load BOA diffuse zenith flux from Wuttke+2006 (extracted via GraphClick)
importeddata=np.genfromtxt('./Raw_Data/UV_Surface_Measurements/wuttke.csv', skip_header=0, skip_footer=0, delimiter=',')
dif_wav=importeddata[:,0] #nm
dif_flux=importeddata[:,1]*2.*np.pi #mW/m2/nm/sr=erg/s/cm2/nm/sr; multiply by 2pi to convert to hemisphere-integrated total surface diffuse radiances
dif_func=interp.interp1d(dif_wav, dif_flux, kind='linear')
dif_flux_interp=dif_func(bin_centers)
#load solar spectrum from Claire et al (2012) models, normalized to 1 au
importeddata=np.genfromtxt('./Raw_Data/Claire_Model/claire_modernsun_highres.dat', skip_header=1, skip_footer=0)
claire_wav=importeddata[:,0] #nm, 0.1 nm resolution, 100-900 nm.
claire_fluxes=importeddata[:,1]#erg/s/cm2/nm
#rebin claire spectrum
claire_fluxes_rebinned=cookbook.rebin_uneven(np.arange(99.995,900.005,0.01), np.arange(100.005, 900.015,0.01),claire_fluxes,bin_left_edges, bin_right_edges)
#Plot to make sure rebinning worked correctly
fig, ax1=plt.subplots(1, figsize=(6,4))
ax1.plot(claire_wav, claire_fluxes, marker='s', color='black', label='Claire Fluxes')
ax1.plot(bin_centers, claire_fluxes_rebinned, marker='s', color='blue', label='Binned Claire Fluxes')
ax1.set_yscale('log')
ax1.set_ylim([1.e-2, 1.e4])
ax1.set_xlim([280.,900.])
ax1.set_xlabel('nm')
ax1.set_ylabel('erg/s/cm2/nm')
ax1.legend(loc=0)
plt.show()
#Let's print out the results
spectable=np.zeros([len(bin_left_edges),5])
spectable[:,0]=bin_left_edges
spectable[:,1]=bin_right_edges
spectable[:,2]=bin_centers
spectable[:,3]=claire_fluxes_rebinned
spectable[:,4]=dif_flux_interp
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) Top of Atm Flux (erg/s/nm/cm2) Zenith Diffuse Flux (erg/s/nm/cm2)\n'
f=open('./LiteratureSpectra/wuttke2006.dat', 'w')
f.write(header)
np.savetxt(f, spectable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Second, form the mixing ratio files
#####Form by replicating the Rugheimer modern Earth profile, then scaling down the H2O level and scaling up the O3 level.
mixingratios=np.genfromtxt('./MixingRatios/rugheimer_earth_modern_mixingratios_v2.dat', skip_header=2, skip_footer=0)
mixingratios[:,3]=mixingratios[:,3]*0.1 #scale down h2o by factor of 10
mixingratios[:,7]=mixingratios[:,7]*1.25 #scale up ozone by factor of 1.25
header0='Based on Rugheimer+2013 Modern Earth Model\n'
header1='Z (cm) N2 CO2 H2O CH4 SO2 O2 O3 H2S\n'
f=open('./MixingRatios/wuttke2006_mixingratios_v2.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, mixingratios, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Finally, form TP profile
#####Form by duplicating Rugheimer+2013 modern Earth profile
tpprofile=np.genfromtxt('./TPProfiles/rugheimer_earth_modern_atmosphereprofile.dat', skip_header=2, skip_footer=0)
header0='Based on Rugheimer+2013 Modern Earth Model\n'
header1='Z (cm) T (K) DEN (cm**-3) P (bar) \n'
f=open('./TPProfiles/wuttke2006_atmosphereprofile.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, tpprofile, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#form_profiles_wuttke()
def form_profiles_woudc():
"""
Purpose of this code is to form the feedstock files to replicate the irradiance measurements from the WOUDC website for Toronto (June 21 2003, SZA=20.376, O3=354, Brewer no. 145)
"""
########First, form the spectral file.
#load measured irradiances
importeddata=np.genfromtxt('./Raw_Data/UV_Surface_Measurements/woudc_toronto_2003_145_cut.dat', skip_header=1, skip_footer=0, delimiter=' ')
woudc_wav=importeddata[:,0] #nm
woudc_flux=importeddata[:,1]*1.e3 #W/m2/nm=1000 erg/s/cm2/nm
#woudc_func=interp.interp1d(woudc_wav, woudc_flux, kind='linear')
#woudc_flux_interp=dif_func(bin_centers)
#Define spectral bins.
bin_centers=woudc_wav
bin_left_edges=woudc_wav-0.25
bin_right_edges=woudc_wav+0.25
#load solar spectrum from Claire et al (2012) models, normalized to 1 au
importeddata2=np.genfromtxt('/home/sranjan/IDL/UV/YoungSun/claire_modernsun_highres.dat', skip_header=1, skip_footer=0)
claire_wav=importeddata2[:,0] #nm, 0.1 nm resolution, 100-900 nm.
claire_fluxes=importeddata2[:,1]#erg/s/cm2/nm
#rebin claire spectrum
claire_fluxes_rebinned=cookbook.rebin_uneven(np.arange(99.995,900.005,0.01), np.arange(100.005, 900.015,0.01),claire_fluxes,bin_left_edges, bin_right_edges)
#Plot to make sure rebinning worked correctly
fig, ax1=plt.subplots(1, figsize=(6,4))
ax1.plot(claire_wav, claire_fluxes, marker='s', color='black', label='Claire Fluxes')
ax1.plot(bin_centers, claire_fluxes_rebinned, marker='s', color='blue', label='Binned Claire Fluxes')
ax1.set_yscale('log')
ax1.set_ylim([1.e-2, 1.e4])
ax1.set_xlim([280.,360.])
ax1.set_xlabel('nm')
ax1.set_ylabel('erg/s/cm2/nm')
ax1.legend(loc=0)
plt.show()
#Let's print out the results
spectable=np.zeros([len(bin_left_edges),5])
spectable[:,0]=bin_left_edges
spectable[:,1]=bin_right_edges
spectable[:,2]=bin_centers
spectable[:,3]=claire_fluxes_rebinned
spectable[:,4]=woudc_flux
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) Top of Atm Flux (erg/s/nm/cm2) Surface Flux (erg/s/nm/cm2)\n'
f=open('./LiteratureSpectra/woudc.dat', 'w')
f.write(header)
np.savetxt(f, spectable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Second, form the mixing ratio files
#####Form by replicating the Rugheimer modern Earth profile, then scaling down the H2O level and scaling up the O3 level.
mixingratios=np.genfromtxt('./MixingRatios/rugheimer_earth_modern_mixingratios_v2.dat', skip_header=2, skip_footer=0)
mixingratios[:,7]=mixingratios[:,7]*1.77 #scale up ozone by factor of 1.25
header0='Based on Rugheimer+2013 Modern Earth Model\n'
header1='Z (cm) N2 CO2 H2O CH4 SO2 O2 O3 H2S\n'
f=open('./MixingRatios/woudc_mixingratios_v2.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, mixingratios, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
###########################################################################################
###########################################################################################
###########################################################################################
#####Finally, form TP profile
#####Form by duplicating Rugheimer+2013 modern Earth profile
tpprofile=np.genfromtxt('./TPProfiles/rugheimer_earth_modern_atmosphereprofile.dat', skip_header=2, skip_footer=0)
header0='Based on Rugheimer+2013 Modern Earth Model\n'
header1='Z (cm) T (K) DEN (cm**-3) P (bar) \n'
f=open('./TPProfiles/woudc_atmosphereprofile.dat', 'w')
f.write(header0)
f.write(header1)
np.savetxt(f, tpprofile, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#form_profiles_woudc()
def form_spectral_feedstock_ourwork():
"""
Purpose of this code is to form the spectral feedstock file to explore formally the dependence of UV surface intensity on various factors. The mixing ratio and TP profiles vary in each case though.
"""
import cookbook
#Extract spectra to match and TOA intensity
#Define spectral bins.
bin_left_edges=np.arange(100.,500.,1.)
bin_right_edges=np.arange(101.,501.,1.)
bin_centers=0.5*(bin_left_edges+bin_right_edges)
#There are no literature intensity values for this file, since at this point we are not comparing against any other datasets but are rather running our code internally. However, we can use the Rugheimer et al base case (60 degrees, 0.2) as a reference
literature_intensities=np.zeros(np.shape(bin_centers))
importeddata=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a=0.2_z=60.dat', skip_header=1, skip_footer=0)
basecase_wav=importeddata[:,2] #nm,
basecase_surface_intensities=importeddata[:,6] #erg/s/cm2/nm
#load solar spectrum from Claire et al (2012) models, normalized to 1 au. These are really TOA intensities. Multiply by mu_0 to get TOA fluxes.
importeddata=np.genfromtxt('./Raw_Data/Claire_Model/claire_youngsun_highres.dat', skip_header=1, skip_footer=0)
claire_wav=importeddata[:,0] #nm, 0.01 nm resolution, 100-900 nm.
claire_fluxes=importeddata[:,1]#erg/s/cm2/nm
#rebin claire spectrum
claire_fluxes_rebinned=cookbook.rebin_uneven(np.arange(99.995,900.005,0.01), np.arange(100.005, 900.015,0.01),claire_fluxes,bin_left_edges, bin_right_edges)
#Plot to make sure rebinning worked correctly
fig, ax1=plt.subplots(1, figsize=(6,4))
ax1.plot(claire_wav, claire_fluxes, marker='s', color='black', label='Claire Fluxes')
ax1.plot(bin_centers, claire_fluxes_rebinned, marker='s', color='blue', label='Binned Claire Fluxes')
ax1.set_yscale('log')
ax1.set_ylim([1.e-2, 1.e4])
ax1.set_xlim([100.,500.])
ax1.set_xlabel('nm')
ax1.set_ylabel('erg/s/cm2/nm')
ax1.legend(loc=0)
plt.show()
#Let's print out the results
spectable=np.zeros([len(bin_left_edges),5])
spectable[:,0]=bin_left_edges
spectable[:,1]=bin_right_edges
spectable[:,2]=bin_centers
spectable[:,3]=claire_fluxes_rebinned
spectable[:,4]=basecase_surface_intensities
header='Left Bin Edge (nm) Right Bin Edge (nm) Bin Center (nm) Top of Atm Intensity (erg/s/nm/cm2) 3.9 Ga R+2015 Surface Intensity (erg/s/nm/cm2)\n'
f=open('./LiteratureSpectra/general_spectral_input.dat', 'w')
f.write(header)
np.savetxt(f, spectable, delimiter=' ', fmt='%1.7e', newline='\n')
f.close()
#form_spectra_feedstock_ourwork()
def form_profiles_co2limtests():
"""
Purpose of this code is to form mixing ratio and T/P profile for our exploration of the surface environment on the 3.9 Ga Earth for a range of two-component atmospheres of CO2 and N2. N2 abundance is always fixed at 0.9 bar equivalent for consistency with Rugheimer et al (2015), while CO2 abundance varies. We derive these by reading in the values for the Rugheimer (2015) atmosphere, which is at 1 bar, and scaling it.
"""
k=1.38064852e-16 #Boltzman constant in erg/K
bar2Ba=1.0e6 #1 bar in Ba
multiples=np.array([0., 1.e-6, 1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.33, 46.6, 470., .6, 8.93e-3]) #values we will be scaling the CO2 column by
####################
####Mixing ratios:
####################
importeddata1= | np.genfromtxt('./MixingRatios/rugheimer_earth_epoch0_recomputed_A0.2_mixingratios_v2.dat', skip_header=2, skip_footer=0) | numpy.genfromtxt |
import sys
import numpy as np
import scipy.integrate
import scipy.special
from ._dblquad import dblquad
HAVE_PYGSL = False
try:
import pygsl.integrate
import pygsl.sf
HAVE_PYGSL = True
except ImportError:
pass
class BinEB(object):
def __init__(
self, tmin, tmax, Nb, windows=None, linear=False, useArcmin=True, fname=None
):
if fname is not None:
self.read_data(fname)
else:
# set basic params
if useArcmin:
am2r = np.pi / 180.0 / 60.0
else:
am2r = 1.0
self.Nb = Nb
self.L = tmin * am2r
self.H = tmax * am2r
if linear:
self.Lb = (self.H - self.L) / Nb * np.arange(Nb) + self.L
self.Hb = (self.H - self.L) / Nb * (np.arange(Nb) + 1.0) + self.L
else:
self.Lb = np.exp(np.log(self.H / self.L) / Nb * np.arange(Nb)) * self.L
self.Hb = (
np.exp(np.log(self.H / self.L) / Nb * (np.arange(Nb) + 1.0))
* self.L
)
self.have_ell_win = False
# make the bin window functions
if windows is None:
def _make_geomwin(L, H):
return lambda x: 2.0 * x / (H * H - L * L)
self.windows = []
for i in range(self.Nb):
self.windows.append(_make_geomwin(self.Lb[i], self.Hb[i]))
else:
def _make_normwin(winf, norm):
return lambda x: winf(x / am2r) / norm
self.windows = []
assert (
len(windows) == Nb
), "binEB requires as many windows as angular bins!"
for i in range(self.Nb):
twin = _make_normwin(windows[i], 1.0)
norm, err = scipy.integrate.quad(twin, self.Lb[i], self.Hb[i])
self.windows.append(_make_normwin(windows[i], norm))
# get fa and fb
self.fa = np.zeros(self.Nb)
self.fa[:] = 1.0
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def fb_int(x, args):
win = args[0]
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(fb_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.fb[i] = val
else:
def fb_int(x, win):
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
fb_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
self.fb[i] = val
self.fa_on = self.fa / np.sqrt(np.sum(self.fa * self.fa))
self.fb_on = self.fb - self.fa * np.sum(self.fa * self.fb) / np.sum(
self.fa * self.fa
)
self.fb_on = self.fb_on / np.sqrt(np.sum(self.fb_on * self.fb_on))
# get Mplus matrix
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def knorm_int(x, args):
win = args[0]
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(knorm_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, args):
win = args[0]
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv2_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv2[i] = val
def inv4_int(x, args):
win = args[0]
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv4_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv4[i] = val
else:
def knorm_int(x, win):
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
knorm_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, win):
return win(x) / x / x
inv2 = | np.zeros(self.Nb) | numpy.zeros |
import torch.utils.data as data
import pickle
import PIL
import numpy as np
import torch
import os
import math, random
import os.path
import sys
import cv2
import skimage
from skimage.transform import rotate
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
WALL_THREAHOD = 5e-2
GC_THRESHOLD = 1e-1
VIZ = False
def decompose_rotation(R):
pitch_2 = math.atan2(-R[2,0],
math.sqrt(R[0, 0]**2 + R[1, 0]**2))
roll_2 = math.atan2(R[2, 1]/math.cos(pitch_2),
R[2, 2]/math.cos(pitch_2))
yaw_2 = math.atan2(R[1,0]/math.cos(pitch_2),
R[0,0]/math.cos(pitch_2))
return [roll_2, pitch_2,yaw_2]
def decompose_up_n(up_n):
pitch = - math.asin(up_n[0])
sin_roll = up_n[1]/math.cos(pitch)
roll = math.asin(sin_roll)
return roll, pitch
def get_xy_vector_from_rp(roll, pitch):
rx = np.array( (math.cos(pitch) * math.cos(roll) , 0.0, math.sin(pitch) ))
ry = np.array( (0.0, math.cos(roll), -math.sin(roll) ))
return rx, ry
def make_dataset(list_name):
text_file = open(list_name, "r")
images_list = text_file.readlines()
text_file.close()
return images_list
def read_array(path):
with open(path, "rb") as fid:
width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
usecols=(0, 1, 2), dtype=int)
fid.seek(0)
num_delimiter = 0
byte = fid.read(1)
while True:
if byte == b"&":
num_delimiter += 1
if num_delimiter >= 3:
break
byte = fid.read(1)
array = np.fromfile(fid, np.float32)
array = array.reshape((width, height, channels), order="F")
return np.transpose(array, (1, 0, 2)).squeeze()
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
class InteriorNetRyFolder(data.Dataset):
def __init__(self, opt, list_path, is_train):
img_list = make_dataset(list_path)
if len(img_list) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.list_path = list_path
self.img_list = img_list
self.opt = opt
self.input_width = 384
self.input_height = 288
self.is_train = is_train
self.rot_range = 10
self.reshape = False
self.lr_threshold = 4.
self.fx = 600.
self.fy = 600.
def load_imgs(self, img_path, normal_path, rot_path):
img = cv2.imread(img_path)
img = img[:, :, ::-1]
normal = (np.float32(cv2.imread(normal_path, -1))/65535. * 2.0) - 1.0
normal = normal[:, :, ::-1]
h, w, c = normal.shape
cam_normal = normal[:, :w//2, :]
global_normal = normal[:, w//2:, :]
mask = np.float32(np.linalg.norm(cam_normal, axis=-1) > 0.9) * np.float32(np.linalg.norm(cam_normal, axis=-1) < 1.1)
R_g_c = np.identity(3)
with open(rot_path, 'r') as f:
rot_row = f.readlines()
for i in range(3):
r1, r2, r3 = rot_row[i].split()
R_g_c[i, :] = np.array((np.float32(r1), np.float32(r2), np.float32(r3)))
return {'img': img,
'cam_normal':cam_normal,
'global_normal': global_normal,
'mask':mask,
'R_g_c': R_g_c}
def resize_imgs(self, train_data, resized_width, resized_height):
train_data['img'] = cv2.resize(train_data['img'],
(resized_width, resized_height),
interpolation=cv2.INTER_AREA)
train_data['cam_normal'] = cv2.resize(train_data['cam_normal'],
(resized_width, resized_height),
interpolation=cv2.INTER_NEAREST)
train_data['global_normal'] = cv2.resize(train_data['global_normal'],
(resized_width, resized_height),
interpolation=cv2.INTER_NEAREST)
train_data['mask'] = cv2.resize(train_data['mask'],
(resized_width, resized_height),
interpolation=cv2.INTER_NEAREST)
return train_data
def crop_imgs(self, train_data, start_x, start_y, crop_w, crop_h):
train_data['img'] = train_data['img'][start_y:start_y+crop_h,
start_x:start_x+crop_w, :]
train_data['cam_normal'] = train_data['cam_normal'][start_y:start_y+crop_h,
start_x:start_x+crop_w, :]
train_data['global_normal'] = train_data['global_normal'][start_y:start_y+crop_h,
start_x:start_x+crop_w, :]
train_data['mask'] = train_data['mask'][start_y:start_y+crop_h,
start_x:start_x+crop_w]
return train_data
def load_precomputed_crop_hw(self, normal_path):
crop_hw_path = normal_path.replace('normal_pair', 'precomputed_crop_hw')[:-4] + '.txt'
with open(crop_hw_path, 'r') as f:
crop_hw = f.readlines()
crop_h, crop_w = crop_hw[0].split()
return int(crop_h), int(crop_w)
def rotate_normal(self, R, normal):
normal_rot = np.dot(R, np.reshape(normal, (-1, 3)).T)
normal_rot = np.reshape(normal_rot.T, (normal.shape[0], normal.shape[1], 3))
normal_rot = normal_rot/(np.maximum(np.linalg.norm(normal_rot, axis=2, keepdims=True), 1e-8))
normal_rot = np.clip(normal_rot, -1.0, 1.0)
return normal_rot
def create_geo_ry(self, cam_normal, global_normal, R_gc):
wall_mask = np.abs(global_normal[:, :, 2]) < WALL_THREAHOD #* mask
upright_u_y = global_normal[:, :, 0].copy()
upright_u_z = global_normal[:, :, 2].copy()
upright_u_z[wall_mask] = 0.0
upright_u_y[wall_mask] = 1.0
global_u_unit = np.stack((-upright_u_z, np.zeros_like(upright_u_z), upright_u_y), axis=2)
global_u_unit = global_u_unit/(np.maximum(np.linalg.norm(global_u_unit, axis=2, keepdims=True), 1e-8))
cam_u_unit = self.rotate_normal(R_gc.T, global_u_unit)
global_t_unit = np.cross(global_u_unit, global_normal)
global_t_unit = global_t_unit/(np.maximum(np.linalg.norm(global_t_unit, axis=2, keepdims=True), 1e-8))
cam_t_unit = self.rotate_normal(R_gc.T, global_t_unit)
cam_geo = np.concatenate((cam_normal, cam_u_unit, cam_t_unit), axis=2)
global_geo = np.concatenate((global_normal[:, :, 2:3], global_u_unit[:, :, 2:3], global_t_unit[:, :, 2:3]), axis=2)
global_geo = global_geo/(np.maximum(np.linalg.norm(global_geo, axis=2, keepdims=True), 1e-8))
return cam_geo, global_geo#, np.float32(wall_mask)
def create_geo_rz(self, cam_normal, global_normal, R_gc):
gc_mask = np.abs(np.abs(global_normal[:, :, 2]) - 1.0) < GC_THRESHOLD #* mask
global_t_x = global_normal[:, :, 0].copy()
global_t_y = global_normal[:, :, 1].copy()
global_t_x[gc_mask] = -1.0
global_t_y[gc_mask] = 0.0
global_t_unit = np.stack( (-global_t_y, global_t_x, np.zeros_like(global_t_x)), axis=2)
global_t_unit = global_t_unit/(np.maximum(np.linalg.norm(global_t_unit, axis=2, keepdims=True), 1e-8))
cam_t_unit = self.rotate_normal(R_gc.T, global_t_unit)
global_u_unit = np.cross(global_normal, global_t_unit)
global_u_unit = global_u_unit/(np.maximum(np.linalg.norm(global_u_unit, axis=2, keepdims=True), 1e-8))
cam_u_unit = self.rotate_normal(R_gc.T, global_u_unit)
cam_geo = np.concatenate((cam_normal, cam_u_unit, cam_t_unit), axis=2)
global_geo = np.concatenate((global_normal[:, :, 2:3],
global_u_unit[:, :, 2:3],
global_t_unit[:, :, 2:3]), axis=2)
global_geo = global_geo/(np.maximum(np.linalg.norm(global_geo, axis=2, keepdims=True), 1e-8))
return cam_geo, global_geo#, np.float32(wall_mask)
def __getitem__(self, index):
targets_1 = {}
normal_path = self.img_list[index].rstrip()#.split()
img_path = normal_path.replace('normal_pair', 'rgb') #+ '.png'
rot_path = normal_path.replace('normal_pair', 'gt_poses')[:-4] + '.txt'
train_data = self.load_imgs(img_path, normal_path, rot_path)
original_h, original_w = train_data['img'].shape[0], train_data['img'].shape[1]
if self.is_train:
crop_h = random.randint(380, original_h)
crop_w = int(round(crop_h*float(original_w)/float(original_h)))
start_y = random.randint(0, original_h - crop_h)
start_x = random.randint(0, original_w - crop_w)
train_data = self.crop_imgs(train_data,
start_x, start_y,
crop_w, crop_h)
train_data = self.resize_imgs(train_data,
self.input_width,
self.input_height)
else:
crop_h, crop_w = self.load_precomputed_crop_hw(normal_path)
start_y = int((original_h - crop_h)/2)
start_x = int((original_w - crop_w)/2)
train_data = self.crop_imgs(train_data, start_x, start_y, crop_w, crop_h)
train_data = self.resize_imgs(train_data, self.input_width, self.input_height)
ratio_x = float(train_data['img'].shape[1])/float(crop_w)
ratio_y = float(train_data['img'].shape[0])/float(crop_h)
fx = self.fx * ratio_x
fy = self.fy * ratio_y
img_1 = np.float32(train_data['img'])/255.0
cam_normal = train_data['cam_normal']
R_g_c = train_data['R_g_c']
global_normal = train_data['global_normal']
upright_normal = self.rotate_normal(R_g_c, cam_normal)
mask = train_data['mask']
gt_up_vector = R_g_c[2, :]
[gt_roll, gt_pitch, gt_yaw]= decompose_rotation(R_g_c)
gt_rp = np.array([gt_roll, gt_pitch])
cam_geo, upright_geo = self.create_geo_ry(cam_normal, upright_normal, R_g_c)
if VIZ:
save_img_name = 'imgs/' + normal_path.split('/')[-3] + '_' + normal_path.split('/')[-1]
save_img_name = save_img_name[:-4] + '.jpg'
skimage.io.imsave(save_img_name, img_1)
save_n_name = save_img_name[:-4] + '_n.jpg'
cam_n_rgb = (cam_geo[:, :, 0:3] + 1.0)/2.
skimage.io.imsave(save_n_name, cam_n_rgb)
save_u_name = save_img_name[:-4] + '_u.jpg'
cam_u_rgb = (cam_geo[:, :, 3:6] + 1.0)/2.
skimage.io.imsave(save_u_name, cam_u_rgb)
save_t_name = save_img_name[:-4] + '_t.jpg'
cam_t_rgb = (cam_geo[:, :, 6:9] + 1.0)/2.
skimage.io.imsave(save_t_name, cam_t_rgb)
upright_v_name = save_img_name[:-4] + '_v.jpg'
upright_geo_rgb = (upright_geo + 1.0)/2.
skimage.io.imsave(upright_v_name, upright_geo_rgb)
print('%s from rotation matrix: roll %f, pitch %f, yaw %f'%(img_path, math.degrees(gt_roll), math.degrees(gt_pitch), math.degrees(gt_yaw)))
plt.figure(figsize=(12, 6))
plt.subplot(2,4,1)
plt.imshow(img_1)
plt.subplot(2,4,5)
plt.imshow(mask, cmap='gray')
plt.subplot(2,4,2)
plt.imshow((cam_geo[:, :, 0:3] + 1.0)/2.)
plt.subplot(2,4,6)
plt.imshow((upright_geo[:, :, 0]+1.)/2.0, cmap='gray')
plt.subplot(2,4,3)
plt.imshow((cam_geo[:, :, 3:6] + 1.0)/2.)
plt.subplot(2,4,7)
plt.imshow( (upright_geo[:, :, 1]+1.0)/2., cmap='gray')
plt.subplot(2,4,4)
plt.imshow((cam_geo[:, :, 6:9] + 1.0)/2.)
plt.subplot(2,4,8)
plt.imshow( (upright_geo+1.0)/2.)
plt.savefig(normal_path.split('/')[-3] + '_' + normal_path.split('/')[-1])
# combine_gt = np.concatenate((img_1, (cam_nu[:, :, 0:3] + 1.0)/2., (upright_nu[:, :, 0:3]+1.)/2.0, (cam_nu[:, :, 3:6] + 1.0)/2., (cam_nu[:, :, 3:6] + 1.0)/2.), axis=1)
# save_img = np.uint16( np.clip(np.round(65535.0 * combine_gt), 0., 65535.))
# cv2.imwrite(normal_path.split('/')[-3] + '_' + normal_path.split('/')[-1], save_img[:, :, ::-1])
print('InteriorNet train we are good')
sys.exit()
final_img = torch.from_numpy(np.ascontiguousarray(img_1).transpose(2,0,1)).contiguous().float()
targets_1['cam_geo'] = torch.from_numpy(np.ascontiguousarray(cam_geo).transpose(2,0,1)).contiguous().float()
targets_1['upright_geo'] = torch.from_numpy(np.ascontiguousarray(upright_geo).transpose(2,0,1)).contiguous().float()
targets_1['gt_mask'] = torch.from_numpy(np.ascontiguousarray(mask)).contiguous().float()
targets_1['gt_rp'] = torch.from_numpy(np.ascontiguousarray(gt_rp)).contiguous().float()
targets_1['R_g_c'] = torch.from_numpy(np.ascontiguousarray(R_g_c)).contiguous().float()
targets_1['gt_up_vector'] = torch.from_numpy(np.ascontiguousarray(gt_up_vector)).contiguous().float()
targets_1['img_path'] = img_path
targets_1['normal_path'] = normal_path
targets_1['fx'] = fx
targets_1['fy'] = fy
return final_img, targets_1
def __len__(self):
return len(self.img_list)
# class SUN360Folder(data.Dataset):
# def __init__(self, opt, list_path, is_train):
# img_list = make_dataset(list_path)
# if len(img_list) == 0:
# raise(RuntimeError("Found 0 images in: " + root + "\n"
# "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
# # self.img_dir = img_dir
# self.list_path = list_path
# self.img_list = img_list
# self.opt = opt
# self.input_width = 384
# self.input_height = 288
# self.is_train = is_train
# self.brightness_factor = 0.1
# self.contrast_factor = 0.1
# self.saturation_factor = 0.1
# self.rot_range = 20
# self.reshape = False
# self.lr_threshold = 4.
# def load_imgs(self, img_path, rot_path):
# img = cv2.imread(img_path)
# try:
# img = img[:,:,::-1]
# except:
# print(img_path)
# sys.exit()
# R_g_c = np.identity(3)
# R_g_c = np.identity(3)
# with open(rot_path, 'r') as f:
# rot_row = f.readlines()
# for i in range(3):
# r1, r2, r3 = rot_row[i].split()
# R_g_c[i, :] = np.array((np.float32(r1), np.float32(r2), np.float32(r3)))
# return {'img': img,
# 'R_g_c': R_g_c}
# def rotate_imgs(self, train_data, random_angle):
# # first rotate input image
# # then compute rotation matrix to transform camera normal
# R_r_c = np.identity(3)
# random_radius = - random_angle/180.0 * math.pi
# R_r_c[0,0] = math.cos(random_radius)
# R_r_c[0,2] = math.sin(random_radius)
# R_r_c[2,0] = -math.sin(random_radius)
# R_r_c[2,2] = math.cos(random_radius)
# cam_normal_rot = np.dot(R_r_c, np.reshape(train_data['cam_normal'], (-1, 3)).T)
# cam_normal_rot = np.reshape(cam_normal_rot.T, (train_data['cam_normal'].shape[0], train_data['cam_normal'].shape[1], 3))
# train_data['R_g_c'] = np.dot(train_data['R_g_c'], R_r_c.T)
# resize = False
# train_data['img'] = rotate(train_data['img'], random_angle, order=1, resize=resize)
# train_data['cam_normal'] = rotate(cam_normal_rot, random_angle, order=0, resize=resize)
# train_data['upright_normal'] = rotate(train_data['upright_normal'], random_angle, order=0, resize=resize)
# train_data['mask'] = rotate(train_data['mask'], random_angle, order=0, resize=resize)
# return train_data
# def resize_imgs(self, train_data, resized_width, resized_height):
# train_data['img'] = cv2.resize(train_data['img'], (resized_width, resized_height), interpolation=cv2.INTER_AREA)
# return train_data
# def crop_imgs(self, train_data, start_x, start_y, crop_w, crop_h):
# train_data['img'] = train_data['img'][start_y:start_y+crop_h, start_x:start_x+crop_w, :]
# return train_data
# def load_intrinsic(self, intrinsic_path):
# intrinsic = np.identity(3)
# with open(intrinsic_path, 'r') as f:
# rot_row = f.readlines()
# for i in range(3):
# r1, r2, r3 = rot_row[i].split()
# intrinsic[i, :] = np.array((np.float32(r1), np.float32(r2), np.float32(r3)))
# return intrinsic[0, 0]/2.0, intrinsic[1, 1]/2.0
# def __getitem__(self, index):
# targets_1 = {}
# img_path = self.img_list[index].rstrip()#.split()
# poses_path = img_path.replace('rgb/', 'poses/').replace('.png', '_true_camera_rotation.txt')
# intrinsic_path = img_path.replace('rgb/', 'intrinsic/').replace('.png', '_true_camera_intrinsic.txt')
# train_data = self.load_imgs(img_path, poses_path)
# original_h, original_w = train_data['img'].shape[0], train_data['img'].shape[1]
# fx_o, fy_o = self.load_intrinsic(intrinsic_path)
# train_data = self.resize_imgs(train_data, self.input_width, self.input_height)
# ratio_x = float(train_data['img'].shape[1])/float(original_w)
# ratio_y = float(train_data['img'].shape[0])/float(original_h)
# fx = fx_o * ratio_x
# fy = fy_o * ratio_y
# img_h, img_w = train_data['img'].shape[0], train_data['img'].shape[1]
# img_1 = np.float32(train_data['img'])/255.0
# mask = np.float32(np.mean(img_1, -1) > 1e-4)
# R_g_c = train_data['R_g_c']
# [gt_roll, gt_pitch, gt_yaw]= decompose_rotation(R_g_c)
# gt_vfov = 2 * math.atan(float(img_h)/(2*fy))
# gt_up_vector = R_g_c[2, :]
# gt_rp = np.array([gt_roll, gt_pitch])
# if VIZ:
# hl_left, hl_right = getHorizonLineFromAngles(gt_pitch, gt_roll, gt_vfov, img_h, img_w)
# slope = np.arctan(hl_right - hl_left)
# midpoint = (hl_left + hl_right) / 2.0
# offset = (midpoint - 0.5) / np.sqrt( 1 + (hl_right - hl_left)**2 )
# slope_idx = np.clip(np.digitize(slope, slope_bins), 0, len(slope_bins)-1)
# offset_idx = np.clip(np.digitize(offset, offset_bins), 0, len(offset_bins)-1)
# print('%s roll %f, pitch %f, yaw %f vfov %f'%(img_path, math.degrees(gt_roll), math.degrees(gt_pitch), math.degrees(gt_yaw), math.degrees(gt_vfov)))
# plt.figure(figsize=(10, 6))
# plt.subplot(2,1,1)
# plt.imshow(img_1)
# plt.subplot(2,1,2)
# plt.imshow(mask, cmap='gray')
# # plt.subplot(2,2,3)
# # plt.imshow((cam_normal+1.)/2.0)
# # plt.subplot(2,2,4)
# # plt.imshow((upright_normal+1.)/2.0)
# plt.savefig(img_path.split('/')[-1])
# print('train we are good MP')
# sys.exit()
# final_img = torch.from_numpy(np.ascontiguousarray(img_1).transpose(2,0,1)).contiguous().float()
# targets_1['gt_mask'] = torch.from_numpy(np.ascontiguousarray(mask)).contiguous().float()
# targets_1['R_g_c'] = torch.from_numpy(np.ascontiguousarray(R_g_c)).contiguous().float()
# targets_1['gt_rp'] = torch.from_numpy(np.ascontiguousarray(gt_rp)).contiguous().float()
# targets_1['gt_up_vector'] = torch.from_numpy(np.ascontiguousarray(gt_up_vector)).contiguous().float()
# targets_1['fx'] = torch.from_numpy(np.ascontiguousarray(fx)).contiguous().float()
# targets_1['fy'] = torch.from_numpy(np.ascontiguousarray(fy)).contiguous().float()
# targets_1['img_path'] = img_path
# return final_img, targets_1
# def __len__(self):
# return len(self.img_list)
class ScanNetFolder(data.Dataset):
def __init__(self, opt, list_path, is_train):
img_list = make_dataset(list_path)
if len(img_list) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.list_path = list_path
self.img_list = img_list
self.opt = opt
self.input_width = 384
self.input_height = 288
self.is_train = is_train
self.rot_range = 10
self.reshape = False
self.lr_threshold = 4.
def load_imgs(self, img_path, normal_path, rot_path, intrinsic_path):
img = cv2.imread(img_path)
img = img[:, :, ::-1]
normal = (np.float32(cv2.imread(normal_path, -1))/65535. * 2.0) - 1.0
cam_normal = normal[:, :, ::-1]
mask = np.float32(np.linalg.norm(cam_normal, axis=-1) > 0.9) * np.float32(np.linalg.norm(cam_normal, axis=-1) < 1.1) #* np.float32(np.max(img,-1) > 1e-3)
R_g_c = | np.identity(3) | numpy.identity |
import numpy as np
import AnalyticGeometryFunctions as ag
class SheepPositionReset():
def __init__(self, initSheepPosition, initSheepPositionNoise):
self.initSheepPosition = initSheepPosition
self.initSheepPositionNoiseLow, self.initSheepPositionNoiseHigh = initSheepPositionNoise
def __call__(self):
noise = [np.random.uniform(self.initSheepPositionNoiseLow, self.initSheepPositionNoiseHigh) * np.random.choice([-1, 1]) for dim in range(len(self.initSheepPosition))]
startSheepPosition = self.initSheepPosition + np.array(noise)
return startSheepPosition
class WolfPositionReset():
def __init__(self, initWolfPosition, initWolfPositionNoise):
self.initWolfPosition = initWolfPosition
self.initWolfPositionNoiseLow, self.initWolfPositionNoiseHigh = initWolfPositionNoise
def __call__(self):
noise = [np.random.uniform(self.initWolfPositionNoiseLow, self.initWolfPositionNoiseHigh) * np.random.choice([-1, 1]) for dim in range(len(self.initWolfPosition))]
startWolfPosition = self.initWolfPosition + np.array(noise)
return startWolfPosition
class SheepPositionTransition():
def __init__(self, nDimOneAgentPhysicalState, positionIndex, checkBoundaryAndAdjust):
self.nDimOneAgentPhysicalState = nDimOneAgentPhysicalState
self.positionIndex = positionIndex
self.checkBoundaryAndAdjust = checkBoundaryAndAdjust
def __call__(self, oldAllAgentState, sheepId, sheepAction):
oldSheepState = oldAllAgentState[self.nDimOneAgentPhysicalState * sheepId : self.nDimOneAgentPhysicalState * (sheepId + 1)]
oldSheepPosition = oldSheepState[min(self.positionIndex) : max(self.positionIndex) + 1]
newSheepVelocity = np.array(sheepAction)
newSheepPosition = oldSheepPosition + newSheepVelocity
checkedPosition, toWallDistance = self.checkBoundaryAndAdjust(newSheepPosition)
return checkedPosition
class WolfPositionTransition():
def __init__(self, nDimOneAgentPhysicalState, positionIndex, checkBoundaryAndAdjust, wolfSpeed):
self.nDimOneAgentPhysicalState = nDimOneAgentPhysicalState
self.positionIndex = positionIndex
self.checkBoundaryAndAdjust = checkBoundaryAndAdjust
self.wolfSpeed = wolfSpeed
def __call__(self, oldAllAgentState, wolfId, sheepId):
oldSheepState = oldAllAgentState[self.nDimOneAgentPhysicalState * sheepId : self.nDimOneAgentPhysicalState * (sheepId + 1)]
oldSheepPosition = oldSheepState[min(self.positionIndex) : max(self.positionIndex) + 1]
oldWolfState = oldAllAgentState[self.nDimOneAgentPhysicalState * wolfId : self.nDimOneAgentPhysicalState * (wolfId + 1)]
oldWolfPosition = oldWolfState[min(self.positionIndex) : max(self.positionIndex) + 1]
heatSeekingDirection = (oldSheepPosition - oldWolfPosition) /np.sqrt(np.sum(np.power(oldSheepPosition - oldWolfPosition, 2)))
newWolfVelocity = self.wolfSpeed * heatSeekingDirection
newWolfPosition = oldWolfPosition + newWolfVelocity
checkedPosition, toWallDistance = self.checkBoundaryAndAdjust(newWolfPosition)
return checkedPosition
class CheckBoundaryAndAdjust():
def __init__(self, xBoundary, yBoundary):
self.xBoundary = xBoundary
self.yBoundary = yBoundary
self.xMin, self.xMax = xBoundary
self.yMin, self.yMax = yBoundary
def __call__(self, position):
if position[0] >= self.xMax:
position[0] = 2 * self.xMax - position[0]
if position[0] <= self.xMin:
position[0] = 2 * self.xMin - position[0]
if position[1] >= self.yMax:
position[1] = 2 * self.yMax - position[1]
if position[1] <= self.yMin:
position[1] = 2 * self.yMin - position[1]
toWallDistance = | np.concatenate([position[0] - self.xBoundary, position[1] - self.yBoundary, self.xBoundary, self.yBoundary]) | numpy.concatenate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
magnetCalibration.py
Author: <NAME>, <NAME>
Last Edited: 06.12.2018
Python Version: 3.6.5
Script to read the magnetic field generated in relation to the voltage applied
from the DAQ measurement card. Generated Calibration File.
Structure of this module:
1) Imports
2) Global Variables
3) Directory and File System Management
4) Initialize Hardware and Measurement Variables
5) Plot Result
6) Main Function
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ 1 ) Imports ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# General Imports
import os
import shutil
import sys
import time as t
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import serial
import nidaqmx
from nidaqmx.constants import AcquisitionType, TaskMode, Slope, \
DigitalWidthUnits
# Imports of own modules
from GaussmeterCommunication import Gaussmeter
from NI_CardCommunication_V2 import NI_CardCommunication
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ 2) Global Variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
Measurementpoints = 10
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ 3) Directory and File System Management ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def cleanUp():
"""
Method for managing the file and directory structure. If an older
calibration is found in the directory "Calibration", it gets moved to the
folder of "OldCalibrations" and is saved with the date of the movement.
This ensures that the newest calibration is always found in the
"Calibration" Folder.
"""
# Definition of directory names
sourceFolder = "Calibration"
destinationFolder = "OldCalibrations"
# Creates new directories if they dont exist already
if not os.path.exists(sourceFolder):
os.makedirs(sourceFolder)
if not os.path.exists(destinationFolder):
os.makedirs(destinationFolder)
# checks if there is content in the Calibration folder
# that needs to be moved
# move content
if len(os.listdir(sourceFolder)) != 0:
timeStamp = createTimeStamp()
if not os.path.exists(destinationFolder + "\\" + timeStamp):
os.makedirs(destinationFolder + "\\" + timeStamp)
listOfFiles = os.listdir(sourceFolder)
for file in listOfFiles:
shutil.move(sourceFolder + "\\" + file,
destinationFolder + "\\" + timeStamp + "\\")
os.chdir(sourceFolder)
def createTimeStamp():
"""
create a TimeStamp in format YYYYMMDD_HHMMSS for saving the old Calibration
Files.
:return: string timeStamp
"""
return str(datetime.now().strftime("%Y%m%d_%H%M%S"))
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~ 4) Initialize Hardware and Measurement Variables ~~~~~~~~~~~~~~~~~~~~~~~ #
def initializeWritingTask():
"""
Initialize NI DAQ Measurement Task
:return: NIDAQ Card Object, Task
"""
measurementCard = NI_CardCommunication()
measurementCard.reset_device("Dev1")
task = measurementCard.create_Task_ao0("Dev1/ao0")
return measurementCard, task
def initializeGaussmeter():
"""
Initialize Communication with Gaussmeter
:return: Gaussmeter Object
"""
gauss = Gaussmeter()
gauss.openCommunication()
return gauss
def createMeasurementArray():
"""
create the Array for which the values are measured.
:return: Array : Measurement Parameters
:return: resultList: List for entries
"""
Amplitude = 5.1
step = 0.05
startArray = np.arange(0, Amplitude+step, step)
loopArray1 = np.arange(Amplitude+step, -1*(Amplitude+step), -1*step)
loopArray2 = np.arange(-1*(Amplitude+step), Amplitude+step, step)
loopArray = np.concatenate([loopArray1, loopArray2])
endArray = np.arange((Amplitude+step), 0-step, -step)
Array = | np.concatenate([startArray, loopArray, endArray]) | numpy.concatenate |
import numpy as np
import pandas as pd
from commons.constants import CANDLE_CLOSE_COLUMN
from commons.debug import print_dataframe
def boll_trend(df):
"""
布林线趋势信号,破上轨做多,下穿均线平多,破下轨做空,上穿均线平多
"""
df_s1 = df.shift(1)
# 做多
long_cond1 = df[CANDLE_CLOSE_COLUMN] > df['BBU'] # 收盘价 > 上轨
long_cond2 = df_s1[CANDLE_CLOSE_COLUMN] <= df_s1['BBU'] # 前收盘价 <= 前上轨
df['signal_long'] = np.where(long_cond1 & long_cond2, 1, np.NaN) # 破上轨做多
# 平多
cover_long_cond1 = df[CANDLE_CLOSE_COLUMN] < df['BBM'] # 收盘价 < 均线
cover_long_cond2 = df_s1[CANDLE_CLOSE_COLUMN] >= df_s1['BBM'] # 前收 >= 均线
df['signal_long'] = np.where(cover_long_cond1 & cover_long_cond2, 0, df['signal_long']) # 下破均线,平多
# 做空
short_cond1 = df[CANDLE_CLOSE_COLUMN] < df['BBL'] # 收盘价 < 下轨
short_cond2 = df_s1[CANDLE_CLOSE_COLUMN] >= df_s1['BBL'] # 前收盘价 >= 前下轨
df['signal_short'] = np.where(short_cond1 & short_cond2, -1, np.NaN) # 破下轨,做空
# 平空
cover_short_cond1 = df[CANDLE_CLOSE_COLUMN] > df['BBM'] # 收盘价 > 均线
cover_short_cond2 = df_s1[CANDLE_CLOSE_COLUMN] <= df_s1['BBM'] # 前收 <= 均线
df['signal_short'] = np.where(cover_short_cond1 & cover_short_cond2, 0, df['signal_short']) # 上穿均线,平空
return df
def boll_trend_with_safe_distance(df, safe_distance_pct):
"""
布林趋势,加入价格与均线距离,在安全距离内开仓
"""
# 计算标准布林趋势指标
df = boll_trend(df)
# 填充信号
df['signal_long'].fillna(method='ffill', inplace=True)
df['signal_short'].fillna(method='ffill', inplace=True)
# 计算持仓时收盘价与均线的距离(绝对值百分比)
distance_pct = np.where((df['signal_long'] == 1) | (df['signal_short'] == -1), | np.abs(df[CANDLE_CLOSE_COLUMN] - df['BBM']) | numpy.abs |
"""
"""
import pandas as pd
import numpy as np
from clintk.cat2vec.feature_selection import LassoSelector
from numpy.testing import assert_array_equal
values = {'feature1': [0, 0, 1, 1, 0],
'feature2': [0, 1, 1, 0, 1],
'feature3': [1, 0, 0, 0, 0],
'feature4': [1, 0, 0, 0, 1]}
coefficients = {'coef': [0, 4.5, -1.2, 0.5],
'feature_name': ['feature1', 'feature2', 'feature3',
'feature4']}
df = pd.DataFrame(values)
# feature1 feature2 feature3 feature4
# 0 0 0 1 1
# 1 0 1 0 0
# 2 1 1 0 0
# 3 1 0 0 0
# 4 0 1 0 1
df_coef = pd.DataFrame(coefficients)
# coef feature_name
# 0 0.0 feature1
# 1 4.5 feature2
# 2 -1.2 feature3
# 3 0.5 feature4
class TestTransformation(object):
def SetUp(self):
return self
def test_fit_transform(self):
selector = LassoSelector(n_features=2,
lasso_coefs=df_coef,
feature_col='feature_name',
coef_col='coef')
# selector.fit(df_coef.feature_name, df_coef.coef)
x_res = selector.transform(df)
x_expected = | np.array([[0, 1], [1, 0], [1, 0], [0, 0], [1, 0]]) | numpy.array |
#!/usr/bin/python
# coding=utf-8
"""
@version:
@author: <NAME>
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm Community Edition
@file: fea.py
@time: 05/15/17 17:25 PM
"""
import tensorflow as tf
import numpy as np
import sys
# np fea opt
def np_kaldi_fea_delt1(features):
feats_padded = np.pad(features, [[1, 1], [0, 0]], "symmetric")
feats_padded = np.pad(feats_padded, [[1, 1], [0, 0]], "symmetric")
row, col = np.shape(features)
l2 = feats_padded[:row]
l1 = feats_padded[1: row + 1]
r1 = feats_padded[3: row + 3]
r2 = feats_padded[4: row + 4]
delt1 = (r1 - l1) * 0.1 + (r2 - l2) * 0.2
return delt1
def np_kaldi_fea_delt2(features):
feats_padded = np.pad(features, [[1, 1], [0, 0]], "symmetric")
feats_padded = np.pad(feats_padded, [[1, 1], [0, 0]], "symmetric")
feats_padded = np.pad(feats_padded, [[1, 1], [0, 0]], "symmetric")
feats_padded = np.pad(feats_padded, [[1, 1], [0, 0]], "symmetric")
row, col = np.shape(features)
l4 = feats_padded[:row]
l3 = feats_padded[1: row + 1]
l2 = feats_padded[2: row + 2]
l1 = feats_padded[3: row + 3]
c = feats_padded[4: row + 4]
r1 = feats_padded[5: row + 5]
r2 = feats_padded[6: row + 6]
r3 = feats_padded[7: row + 7]
r4 = feats_padded[8: row + 8]
delt2 = - 0.1 * c - 0.04 * (l1 + r1) + 0.01 * (l2 + r2) + 0.04 * (l3 + l4 + r4 + r3)
return delt2
# def np_fea_delt(features):
# row, col = np.shape(features)
# l2 = np.pad(features, [[2, 0], [0, 0]], 'constant')[:row]
# l1 = np.pad(features, [[1, 0], [0, 0]], 'constant')[:row]
# r1 = np.pad(features, [[0, 1], [0, 0]], 'constant')[1:row + 1]
# r2 = np.pad(features, [[0, 2], [0, 0]], 'constant')[2:row + 2]
# delt = (r2 - l2) * 0.2 + (r1 - l1) * 0.1
# return delt
def np_fea_add_delt(feature):
fb = []
fb.append(feature)
delt1 = np_kaldi_fea_delt1(feature)
# delt1 = np_fea_delt(feature)
fb.append(delt1)
# delt2 = np_fea_delt(delt1)
delt2 = np_kaldi_fea_delt2(feature)
fb.append(delt2)
fb = | np.concatenate(fb, 1) | numpy.concatenate |
import struct
from datetime import datetime
import numpy as np
from pySDC.helpers.pysdc_helper import FrozenClass
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
class _fault_stats(FrozenClass):
def __init__(self):
self.nfaults_called = 0
self.nfaults_injected_u = 0
self.nfaults_injected_f = 0
self.nfaults_detected = 0
self.ncorrection_attempts = 0
self.nfaults_missed = 0
self.nfalse_positives = 0
self.nfalse_positives_in_correction = 0
self.nclean_steps = 0
self._freeze()
class implicit_sweeper_faults(generic_implicit):
"""
LU sweeper using LU decomposition of the Q matrix for the base integrator, special type of generic implicit sweeper
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'allow_fault_correction' not in params:
params['allow_fault_correction'] = False
if 'detector_threshold' not in params:
params['detector_threshold'] = 1.0
if 'dump_injections_filehandle' not in params:
params['dump_injections_filehandle'] = None
# call parent's initialization routine
super(implicit_sweeper_faults, self).__init__(params)
self.fault_stats = _fault_stats()
self.fault_injected = False
self.fault_detected = False
self.in_correction = False
self.fault_iteration = False
def reset_fault_stats(self):
"""
Helper method to reset all fault related stats and flags. Will be called after the run in post-processing.
"""
self.fault_stats = _fault_stats()
self.fault_injected = False
self.fault_detected = False
self.in_correction = False
self.fault_iteration = False
@staticmethod
def bitsToFloat(b):
"""
Static helper method to get a number from bit into float representation
Args:
b: bit representation of a number
Returns:
float representation of b
"""
s = struct.pack('>q', b)
return struct.unpack('>d', s)[0]
@staticmethod
def floatToBits(f):
"""
Static helper method to get a number from float into bit representation
Args:
f: float representation of a number
Returns:
bit representation of f
"""
s = struct.pack('>d', f)
return struct.unpack('>q', s)[0]
def do_bitflip(self, a, pos):
"""
Method to do a bit flip
Args:
a: float representation of a number
pos (int between 0 and 63): position of bit flip
Returns:
float representation of a number after bit flip at pos
"""
# flip of mantissa (fraction) bit (pos between 0 and 51) or of exponent bit (pos between 52 and 62)
if pos < 63:
b = self.floatToBits(a)
# mask: bit representation with 1 at pos and 0 elsewhere
mask = 1 << pos
# ^: bitwise xor-operator --> bit flip at pos
c = b ^ mask
return self.bitsToFloat(c)
# "flip" of sign bit (pos = 63)
elif pos == 63:
return -a
def inject_fault(self, type=None, target=None):
"""
Main method to inject a fault
Args:
type (str): string describing whether u of f should be affected
target: data to be modified
"""
pos = 0
bitflip_entry = 0
# do bitflip in u
if type == 'u':
# do something to target = u here!
# do a bitflip at random vector entry of u at random position in bit representation
ulen = len(target.values)
bitflip_entry = | np.random.randint(ulen) | numpy.random.randint |
"""
Created on Thu Jan 26 17:04:11 2017
@author: <NAME>, <EMAIL>
"""
#%matplotlib inline
import numpy as np
import pandas as pd
import dicom
import os
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
import scipy.ndimage # added for scaling
import cv2
import time
import glob
from skimage import measure, morphology, segmentation
import SimpleITK as sitk
RESIZE_SPACING = [2,2,2] # z, y, x (x & y MUST be the same)
RESOLUTION_STR = "2x2x2"
img_rows = 448
img_cols = 448 # global values
DO_NOT_USE_SEGMENTED = True
#STAGE = "stage1"
STAGE_DIR_BASE = "../input/%s/" # on one cluster we had input_shared
LUNA_MASKS_DIR = "../luna/data/original_lung_masks/"
luna_subset = 0 # initial
LUNA_BASE_DIR = "../luna/data/original_lungs/subset%s/" # added on AWS; data as well
LUNA_DIR = LUNA_BASE_DIR % luna_subset
CSVFILES = "../luna/data/original_lungs/CSVFILES/%s"
LUNA_ANNOTATIONS = CSVFILES % "annotations.csv"
LUNA_CANDIDATES = CSVFILES % "candidates.csv"
# Load the scans in given folder path (loads the most recent acquisition)
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#slices.sort(key = lambda x: int(x.InstanceNumber))
acquisitions = [x.AcquisitionNumber for x in slices]
vals, counts = np.unique(acquisitions, return_counts=True)
vals = vals[::-1] # reverse order so the later acquisitions are first (the np.uniques seems to always return the ordered 1 2 etc.
counts = counts[::-1]
## take the acquistions that has more entries; if these are identical take the later entrye
acq_val_sel = vals[np.argmax(counts)]
##acquisitions = sorted(np.unique(acquisitions), reverse=True)
if len(vals) > 1:
print ("WARNING ##########: MULTIPLE acquisitions & counts, acq_val_sel, path: ", vals, counts, acq_val_sel, path)
slices2= [x for x in slices if x.AcquisitionNumber == acq_val_sel]
slices = slices2
## ONE path includes 2 acquisitions (2 sets), take the latter acquiisiton only whihch cyupically is better than the first/previous ones.
## example of the '../input/stage1/b8bb02d229361a623a4dc57aa0e5c485'
#slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8, BUG should be float
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_3d_data_slices(slices): # get data in Hunsfield Units
slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images )
image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0
# Convert to Hounsfield units (HU)
# The intercept is usually -1024
for slice_number in range(len(slices)): # from v 8
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1: # added 16 Jan 2016, evening
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96)
### Changes/correction - 31.01.2017
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
MARKER_INTERNAL_THRESH = -400
MARKER_FRAME_WIDTH = 9 # 9 seems OK for the half special case ...
def generate_markers(image):
#Creation of the internal Marker
useTestPlot = False
if useTestPlot:
timg = image
plt.imshow(timg, cmap='gray')
plt.show()
add_frame_vertical = True
if add_frame_vertical: # add frame for potentially closing the lungs that touch the edge, but only vertically
fw = MARKER_FRAME_WIDTH # frame width (it looks that 2 is the minimum width for the algorithms implemented here, namely the first 2 operations for the marker_internal)
xdim = image.shape[1]
#ydim = image.shape[0]
img2 = np.copy(image)
#y3 = ydim // 3
img2 [:, 0] = -1024
img2 [:, 1:fw] = 0
img2 [:, xdim-1:xdim] = -1024
img2 [:, xdim-fw:xdim-1] = 0
marker_internal = img2 < MARKER_INTERNAL_THRESH
else:
marker_internal = image < MARKER_INTERNAL_THRESH # was -400
useTestPlot = False
if useTestPlot:
timg = marker_internal
plt.imshow(timg, cmap='gray')
plt.show()
correct_edges2 = False ## NOT a good idea - no added value
if correct_edges2:
marker_internal[0,:] = 0
marker_internal[:,0] = 0
#marker_internal[:,1] = True
#marker_internal[:,2] = True
marker_internal[511,:] = 0
marker_internal[:,511] = 0
marker_internal = segmentation.clear_border(marker_internal, buffer_size=0)
marker_internal_labels = measure.label(marker_internal)
areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
external_a = ndimage.binary_dilation(marker_internal, iterations=10) # was 10
external_b = ndimage.binary_dilation(marker_internal, iterations=55) # was 55
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
# Some of the starting Code is taken from ArnavJain, since it's more readable then my own
def generate_markers_3d(image):
#Creation of the internal Marker
marker_internal = image < -400
marker_internal_labels = np.zeros(image.shape).astype(np.int16)
for i in range(marker_internal.shape[0]):
marker_internal[i] = segmentation.clear_border(marker_internal[i])
marker_internal_labels[i] = measure.label(marker_internal[i])
#areas = [r.area for r in measure.regionprops(marker_internal_labels)]
areas = [r.area for i in range(marker_internal.shape[0]) for r in measure.regionprops(marker_internal_labels[i])]
for i in range(marker_internal.shape[0]):
areas = [r.area for r in measure.regionprops(marker_internal_labels[i])]
areas.sort()
if len(areas) > 2:
for region in measure.regionprops(marker_internal_labels[i]):
if region.area < areas[-2]:
for coordinates in region.coords:
marker_internal_labels[i, coordinates[0], coordinates[1]] = 0
marker_internal = marker_internal_labels > 0
#Creation of the external Marker
# 3x3 structuring element with connectivity 1, used by default
struct1 = ndimage.generate_binary_structure(2, 1)
struct1 = struct1[np.newaxis,:,:] # expand by z axis .
external_a = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=10)
external_b = ndimage.binary_dilation(marker_internal, structure=struct1, iterations=55)
marker_external = external_b ^ external_a
#Creation of the Watershed Marker matrix
#marker_watershed = np.zeros((512, 512), dtype=np.int) # origi
marker_watershed = np.zeros((marker_external.shape), dtype=np.int)
marker_watershed += marker_internal * 255
marker_watershed += marker_external * 128
return marker_internal, marker_external, marker_watershed
BINARY_CLOSING_SIZE = 7 #was 7 before final; 5 for disk seems sufficient - for safety let's go with 6 or even 7
def seperate_lungs(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct)
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def rescale_n(n,reduce_factor):
return max( 1, int(round(n / reduce_factor)))
def seperate_lungs_cv2(image): # for increased speed
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers(image)
#image_size = image.shape[0]
reduce_factor = 512 / image.shape[0]
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, 1)
sobel_filtered_dy = ndimage.sobel(image, 0)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
useTestPlot = False
if useTestPlot:
timg = sobel_gradient
plt.imshow(timg, cmap='gray')
plt.show()
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
if useTestPlot:
timg = marker_external
plt.imshow(timg, cmap='gray')
plt.show()
#Reducing the image created by the Watershed algorithm to its outline
#wsize = rescale_n(3,reduce_factor) # THIS IS TOO SMALL, dynamically adjusting the size for the watersehed algorithm
outline = ndimage.morphological_gradient(watershed, size=(3,3)) # original (3,3), (wsize, wsize) is too small to create an outline
outline = outline.astype(bool)
outline_u = outline.astype(np.uint8) #added
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
use_reduce_factor = True
if use_reduce_factor:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, rescale_n(8,reduce_factor)) # dyanmically adjust the number of iterattions; original was 8
else:
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct_cv2 = blackhat_struct.astype(np.uint8)
#Perform the Black-Hat
#outline += ndimage.black_tophat(outline, structure=blackhat_struct) # original slow
#outline1 = outline + (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool)
#outline2 = outline + ndimage.black_tophat(outline, structure=blackhat_struct)
#np.array_equal(outline1,outline2) # True
outline += (cv2.morphologyEx(outline_u, cv2.MORPH_BLACKHAT, kernel=blackhat_struct_cv2)).astype(np.bool) # fats
if useTestPlot:
timg = outline
plt.imshow(timg, cmap='gray')
plt.show()
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
if useTestPlot:
timg = lungfilter
plt.imshow(timg, cmap='gray')
plt.show()
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure2 = morphology.disk(2) # used to fill the gaos/holes close to the border (otherwise the large sttructure would create a gap by the edge)
if use_reduce_factor:
structure3 = morphology.disk(rescale_n(BINARY_CLOSING_SIZE,reduce_factor)) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
else:
structure3 = morphology.disk(BINARY_CLOSING_SIZE) # dynanically adjust; better , 5 seems sufficient, we use 7 for safety/just in case
##lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, ORIGINAL iterations=3) # was structure=np.ones((5,5))
lungfilter2 = ndimage.morphology.binary_closing(lungfilter, structure=structure2, iterations=3) # ADDED
lungfilter3 = ndimage.morphology.binary_closing(lungfilter, structure=structure3, iterations=3)
lungfilter = np.bitwise_or(lungfilter2, lungfilter3)
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
#image.shape
#segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512)).astype(np.int16)) # was -2000 someone suggested 30
segmented = np.where(lungfilter == 1, image, -2000*np.ones(image.shape).astype(np.int16)) # was -2000 someone suggested 30
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image):
#Creation of the markers as shown above:
marker_internal, marker_external, marker_watershed = generate_markers_3d(image)
#Creation of the Sobel-Gradient
sobel_filtered_dx = ndimage.sobel(image, axis=2)
sobel_filtered_dy = ndimage.sobel(image, axis=1)
sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy)
sobel_gradient *= 255.0 / np.max(sobel_gradient)
#Watershed algorithm
watershed = morphology.watershed(sobel_gradient, marker_watershed)
#Reducing the image created by the Watershed algorithm to its outline
outline = ndimage.morphological_gradient(watershed, size=(1,3,3))
outline = outline.astype(bool)
#Performing Black-Tophat Morphology for reinclusion
#Creation of the disk-kernel and increasing its size a bit
blackhat_struct = [[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0]]
blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8)
blackhat_struct = blackhat_struct[np.newaxis,:,:]
#Perform the Black-Hat
outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time
#Use the internal marker and the Outline that was just created to generate the lungfilter
lungfilter = np.bitwise_or(marker_internal, outline)
#Close holes in the lungfilter
#fill_holes is not used here, since in some slices the heart would be reincluded by accident
##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is
structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case
structure = structure[np.newaxis,:,:]
lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5))
### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more
#Apply the lungfilter (note the filtered areas being assigned -2000 HU)
segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape))
return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def get_slice_location(dcm):
return float(dcm[0x0020, 0x1041].value)
def thru_plane_position(dcm):
"""Gets spatial coordinate of image origin whose axis
is perpendicular to image plane.
"""
orientation = tuple((float(o) for o in dcm.ImageOrientationPatient))
position = tuple((float(p) for p in dcm.ImagePositionPatient))
rowvec, colvec = orientation[:3], orientation[3:]
normal_vector = np.cross(rowvec, colvec)
slice_pos = np.dot(position, normal_vector)
return slice_pos
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = map(float, ([scan[0].SliceThickness] + scan[0].PixelSpacing))
spacing = np.array(list(spacing))
#scan[2].SliceThickness
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest') ### early orig modified
return image, new_spacing
def segment_all(stage, part=0, processors=1, showSummaryPlot=True): # stage added to simplify the stage1 and stage2 calculations
count = 0
STAGE_DIR = STAGE_DIR_BASE % stage
folders = glob.glob(''.join([STAGE_DIR,'*']))
if len(folders) == 0:
print ("ERROR, check directory, no folders found in: ", STAGE_DIR )
for folder in folders:
count += 1
if count % processors == part: # do this part in this process, otherwise skip
path = folder
slices = load_scan(path)
image_slices = get_3d_data_slices(slices)
#mid = len(image_slices) // 2
#img_sel = mid
useTestPlot = False
if useTestPlot:
print("Shape before segmenting\t", image_slices.shape)
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
start = time.time()
resampleImages = True
if resampleImages:
image_resampled, spacing = resample(image_slices, slices, RESIZE_SPACING) # let's start wkith this small resolutuion for workign our the system (then perhaps 2, 0.667, 0.667)
print("Shape_before_&_after_resampling\t", image_slices.shape,image_resampled.shape)
if useTestPlot:
plt.imshow(image_slices[image_slices.shape[0]//2], cmap=plt.cm.bone)
plt.show()
plt.imshow(image_resampled[image_resampled.shape[0]//2], cmap=plt.cm.bone)
np.max(image_slices)
np.max(image_resampled)
np.min(image_slices)
np.min(image_resampled)
plt.show()
image_slices = image_resampled
shape = image_slices.shape
l_segmented = np.zeros(shape).astype(np.int16)
l_lungfilter = np.zeros(shape).astype(np.bool)
l_outline = np.zeros(shape).astype(np.bool)
l_watershed = np.zeros(shape).astype(np.int16)
l_sobel_gradient = np.zeros(shape).astype(np.float32)
l_marker_internal = np.zeros(shape).astype(np.bool)
l_marker_external = np.zeros(shape).astype(np.bool)
l_marker_watershed = np.zeros(shape).astype(np.int16)
# start = time.time()
i=0
for i in range(shape[0]):
l_segmented[i], l_lungfilter[i], l_outline[i], l_watershed[i], l_sobel_gradient[i], l_marker_internal[i], l_marker_external[i], l_marker_watershed[i] = seperate_lungs_cv2(image_slices[i])
print("Rescale & Seg time, and path: ", ((time.time() - start)), path )
if useTestPlot:
plt.hist(image_slices.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
plt.hist(l_segmented.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
img_sel_i = shape[0] // 2
# Show some slice in the middle
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
path_rescaled = path.replace(stage, ''.join([stage, "_", RESOLUTION_STR]), 1)
path_segmented = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR]), 1)
path_segmented_crop = path.replace(stage, ''.join([stage, "_segmented_", RESOLUTION_STR, "_crop"]), 1)
np.savez_compressed (path_rescaled, image_slices)
np.savez_compressed (path_segmented, l_segmented)
mask = l_lungfilter.astype(np.int8)
regions = measure.regionprops(mask) # this measures the largest region and is a bug when the mask is not the largest region !!!
bb = regions[0].bbox
#print(bb)
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
dx = 0 # could be reduced
## have to reduce dx as for istance at least image the lungs stretch right to the border evebn without cropping
## namely for '../input/stage1/be57c648eb683a31e8499e278a89c5a0'
crop_max_ratio_z = 0.6 # 0.8 is to big make_submit2(45, 1)
crop_max_ratio_y = 0.4
crop_max_ratio_x = 0.6
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
mask_shape= mask.shape
image_shape = l_segmented.shape
mask_volume = zlen*ylen*zlen /(mask_shape[0] * mask_shape[1] * mask_shape[2])
mask_volume_thresh = 0.08 # anything below is too small (maybe just one half of the lung or something very small0)
mask_volume_check = mask_volume > mask_volume_thresh
# print ("Mask Volume: ", mask_volume )
### DO NOT allow the mask to touch x & y ---> if it does it is likely a wrong one as for:
## folders[3] , path = '../input/stage1/9ba5fbcccfbc9e08edcfe2258ddf7
maskOK = False
if bxy_min >0 and bxy_max < 512 and mask_volume_check and zlen/mask_shape[0] > crop_max_ratio_z and ylen/mask_shape[1] > crop_max_ratio_y and xlen/mask_shape[2] > crop_max_ratio_x:
## square crop and at least dx elements on both sides on x & y
bxy_min = np.min(bb[1:3])
bxy_max = np.max(bb[4:6])
if bxy_min == 0 or bxy_max == 512:
# Mask to bigg, auto-correct
print("The following mask likely too big, autoreducing by:", dx)
bxy_min = np.max((bxy_min, dx))
bxy_max = np.min ((bxy_max, mask_shape[1] - dx))
image = l_segmented[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
mask = mask[bb[0]:bb[3], bxy_min:bxy_max, bxy_min:bxy_max]
#maskOK = True
print ("Shape, cropped, bbox ", mask_shape, mask.shape, bb)
elif bxy_min> 0 and bxy_max < 512 and mask_volume_check and zlen/mask.shape[0] > crop_max_ratio_z:
## cut on z at least
image = l_segmented[bb[0]:bb[3], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[bb[0]:bb[3], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask too small, NOT auto-cropping x-y: shape, cropped, bbox, ratios, violume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
else:
image = l_segmented[0:mask_shape[0], dx: image_shape[1] - dx, dx: image_shape[2] - dx]
#mask = mask[0:mask_shape[0], dx: mask_shape[1] - dx, dx: mask_shape[2] - dx]
print("Mask wrong, NOT auto-cropping: shape, cropped, bbox, ratios, volume:", mask_shape, image.shape, bb, path, zlen/mask_shape[0], ylen/mask_shape[1], xlen/mask_shape[2], mask_volume)
if showSummaryPlot:
img_sel_i = shape[0] // 2
# Show some slice in the middle
useSeparatePlots = False
if useSeparatePlots:
plt.imshow(image_slices[img_sel_i], cmap=plt.cm.gray)
plt.show()
# Show some slice in the middle
plt.imshow(l_segmented[img_sel_i], cmap='gray')
plt.show()
else:
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(image_slices[img_sel_i],cmap=plt.cm.bone)
ax[1].imshow(l_segmented[img_sel_i],cmap=plt.cm.bone)
plt.show()
# Show some slice in the middle
#plt.imshow(image[image.shape[0] // 2], cmap='gray') # don't show it for simpler review
#plt.show()
np.savez_compressed(path_segmented_crop, image)
#print("Mask count: ", count)
#print ("Shape: ", image.shape)
return part, processors, count
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
#image = lung_img
#spacing = new_spacing
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands) > 0 or useAll):
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
#useAll = True
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False # was True
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
#mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
#return scans, masks, sids # not yet, old style
def load_scans_masks_no_nodules(luna_subset, use_unsegmented=True): # load only the ones that do not contain nodules
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
sids = []
scans = []
masks = []
cnt = 0
skipped = 0
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
#useAll = True
if (len(cands)):
print("Skipping entry with nodules ", seriesuid)
skipped += 1
else:
sids.append(seriesuid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
#scan_z.keys()
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
mask = mask_z['arr_0']
scans.append(scan)
masks.append(mask)
cnt += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def load_scans(path): # function used for testing
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
return np.stack([s.pixel_array for s in slices])
def get_scans(df,scans_list):
scans=np.stack([load_scans(scan_folder+df.id[i_scan[0]])[i_scan[1]] for i_scan in scans_list])
scans=process_scans(scans)
view_scans(scans)
return(scans)
def process_scans(scans): # used for tesing
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
img = 255.0 / np.amax(img) * img
img =img.astype(np.uint8)
img =cv2.resize(img, (img_rows, img_cols))
scans1[i,0,:,:]=img
return (scans1)
only_with_nudels = True
def convert_scans_and_masks(scans, masks, only_with_nudels):
flattened1 = [val for sublist in scans for val in sublist[1:-1]] # skip one element at the beginning and at the end
scans1 = np.stack(flattened1)
flattened1 = [val for sublist in masks for val in sublist[1:-1]] # skip one element at the beginning and at the end
masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans = zero_center(scans)
masks = np.copy(masks1)
## if needed do the resize here ....
img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
img_cols = scans.shape[2]
scans1=np.zeros((scans.shape[0],1,img_rows,img_cols))
for i in range(scans.shape[0]):
img=scans[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
scans1[i,0,:,:]=img
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[i]]
#masks = [masks[i]]
def convert_scans_and_masks_xd_ablanks(scans, masks, blankids, only_with_nudels, dim=3):
# reuse scan to reduce memory footprint
dim_orig = dim
add_blank_spacing_size = dim * 8 #### use 4 for [0 - 3] and 8 for [4 - 7] ???initial trial (should perhaps be just dim ....)
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
blanks_per_axis = 4 # skip border
crop = 16
dx = (img_cols - 2 * crop) // (blanks_per_axis + 2)
dy = (img_rows - 2 * crop) // (blanks_per_axis + 2)
for mask in masks:
if (np.sum(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
for ix in range(blanks_per_axis):
xpos = crop + (ix+1)*dx + dx //2
for iy in range(blanks_per_axis):
ypos = crop + (iy+1)*dy + dy //2
#print (xpos, ypos)
mask[skip_low, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
#for k in range(len(blankids)):
# if blankids[k] > 0:
# mask = masks[k]
# ## add the blanls
# for i in range(skip_low, mask.shape[0]-skip_high, add_blank_spacing_size):
# mask[skip_low, 0, 0] = -1 # negative pixel to be picked up below and corrected back to none
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1[:,skip_low], axis = (1,2)) ## abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
masks1[masks1 < 0] = 0 # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
#scans = [scans[j]]
#masks = [masks[j]]
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
#add_blank_spacing_size = 0 # dim *4 # dim # was dim ### set to 0 for version_16 #### initial trial (should perhaps be just dim ....), if 0 - do not add ...
#add_blank_layers = 0 # was 4
#skip = dim // 2 # old
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = [] # 3 layers
#scan = scans[0]
for scan in scans: ##TEMP
tmp = []
#i = 1
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
##blanks_per_axis = 6 # cover all slice
##crop = 44
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
#dx = (img_cols - 2 * crop) // (blanks_per_axis)
#dy = (img_rows - 2 * crop) // (blanks_per_axis)
#dx = dxrange // (blanks_per_axis+1)
#dy = dyrange // (blanks_per_axis+1)
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
#mask = masks[0]
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
#img1 = mask[i-1]
#img2 = mask[i]
#img3 = mask[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
#nudels_pix_count = np.sum(np.abs(masks1[:,skip_low]), axis = (1,2)) ## CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
#blank_mask_factor = np.sign(nudels_pix_count)[nudels_pix_count != 0]
#sum(blank_mask_factor)
#blank_mask_factor[blank_mask_factor <0] = 0
#mask1_orig = masks1
#np.sum(mask1_orig)
#np.min(masks1)
#masks1 = masks1[nudels_pix_count != 0] * blank_mask_factor # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[masks1 < 0] = 0 # !!!!!!!!!!!!!! in GRID version do NOT do that - do it in the key version 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#masks1[nudels_pix_count < 0] = 0 # making empty mask for balancing training set
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
### after this scans1 becomes float64 ....
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def convert_scans_and_masks_3d(scans, masks, only_with_nudels):
# reuse scan to reduce memory footprint
work = [] # 3 layers
#scan = scans[0]
for scan in scans:
tmp = []
#i = 0
#for i in range(1, scan.shape[0]-1, 3): # SKIP EVERY 3
for i in range(1, scan.shape[0]-1):
img1 = scan[i-1]
img2 = scan[i]
img3 = scan[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
#flattened1 = [val for sublist in work for val in sublist ] # NO skipping as we have already cut the first and the last layer
#scans1 = np.stack(flattened1)
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
use_3d_mask = False
if use_3d_mask:
work = [] # 3 layers
#mask = masks[0]
for mask in masks:
tmp = []
#i = 0
for i in range(1, mask.shape[0]-1, 3): # SKIP EVERY 3
img1 = mask[i-1]
img2 = mask[i]
img3 = mask[i+1]
rgb = np.stack((img1, img2, img3))
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[1:-1]] ) # skip one element at the beginning and at the end
#masks1 = np.stack(flattened1) # 10187
#only_with_nudels = True
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(masks1, axis = (1,2,3))
else:
nudels_pix_count = np.sum(masks1, axis = (1,2))
scans1 = scans1[nudels_pix_count>0]
masks1 = masks1[nudels_pix_count>0] # 493 -- circa 5 % with nudeles oters without; 232 if we skip over every 3 layers and use a 3d mask
#nudels2 = np.where(masks1 == 1, scans1, -4000*np.ones(( masks1.shape[1], masks1.shape[2))) ### was -2000
#nudels1 = np.where(masks1 == 1, scans1, masks1 - 4000) ### was -2000
#nudles1_rf = nudels1.flatten()
#nudles1_rf = nudles1_rf[nudles1_rf > -4000]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
#for i in range(scans.shape[0]):
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
#np.mean(scans) # 0.028367 / 0.0204
#np.min(scans) # 0
#np.max(scans) #
scans1 = zero_center(scans1)
#masks = np.copy(masks1)
## if needed do the resize here .... (img_rows and img_cols are global values defined externally)
#img_rows = scans.shape[1] ### redefine img_rows/ cols and add resize if needed
#img_cols = scans.shape[2]
# scans already are in the tensor mode with 3 rgb elements ....
#scans1 = scans ## no change
#scans1=np.zeros((scans.shape[0],3,img_rows,img_cols))
#for i in range(scans.shape[0]):
# img=scans[i,:,:]
# ###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
# scans1[i,0,:,:]=img
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def view_scans(scans):
#%matplotlib inline
for i in range(scans.shape[0]):
print ('scan '+str(i))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def view_scans_widget(scans):
#%matplotlib tk
for i in range(scans.shape[0]):
plt.figure(figsize=(7,7))
plt.imshow(scans[i,0,:,:], cmap=plt.cm.gray)
plt.show()
def get_masks(scans,masks_list):
#%matplotlib inline
scans1=scans.copy()
maxv=255
masks=np.zeros(shape=(scans.shape[0],1,img_rows,img_cols))
for i_m in range(len(masks_list)):
for i in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
for j in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
masks[masks_list[i_m][0],0,masks_list[i_m][2]+i,masks_list[i_m][1]+j]=1
for i1 in range(-masks_list[i_m][3],masks_list[i_m][3]+1):
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]+masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+i1,masks_list[i_m][1]-masks_list[i_m][3]]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]+masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
scans1[masks_list[i_m][0],0,masks_list[i_m][2]-masks_list[i_m][3],masks_list[i_m][1]+i1]=maxv=255
for i in range(scans.shape[0]):
print ('scan '+str(i))
f, ax = plt.subplots(1, 2,figsize=(10,5))
ax[0].imshow(scans1[i,0,:,:],cmap=plt.cm.gray)
ax[1].imshow(masks[i,0,:,:],cmap=plt.cm.gray)
plt.show()
return(masks)
def augmentation(scans,masks,n):
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=25, # was 25
width_shift_range=0.3, # ws 0.3; was 0.1# tried 0.01
height_shift_range=0.3, # was 0.3; was 0.1 # tried 0.01
horizontal_flip=True,
vertical_flip=True,
zoom_range=False)
i=0
scans_g=scans.copy()
for batch in datagen.flow(scans, batch_size=1, seed=1000):
scans_g=np.vstack([scans_g,batch])
i += 1
if i > n:
break
i=0
masks_g=masks.copy()
for batch in datagen.flow(masks, batch_size=1, seed=1000):
masks_g=np.vstack([masks_g,batch])
i += 1
if i > n:
break
return((scans_g,masks_g))
def hu_to_pix (hu):
return (hu - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN
def pix_to_hu (pix):
return (pix + PIXEL_MEAN) * (MAX_BOUND - MIN_BOUND) + MIN_BOUND
from scipy import stats
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
# the following 3 functions to read LUNA files are from: https://www.kaggle.com/arnavkj95/data-science-bowl-2017/candidate-generation-and-luna16-preprocessing/notebook
'''
This funciton reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
'''
def load_itk(filename):
# Reads the image using SimpleITK
itkimage = sitk.ReadImage(filename)
# Convert the image to a numpy array first and then shuffle the dimensions to get axis in the order z,y,x
ct_scan = sitk.GetArrayFromImage(itkimage)
# Read the origin of the ct_scan, will be used to convert the coordinates from world to voxel and vice versa.
origin = np.array(list(reversed(itkimage.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itkimage.GetSpacing())))
return ct_scan, origin, spacing
'''
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
'''
def world_2_voxel(world_coordinates, origin, spacing):
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
'''
This function is used to convert the voxel coordinates to world coordinates using
the origin and spacing of the ct_scan.
'''
def voxel_2_world(voxel_coordinates, origin, spacing):
stretched_voxel_coordinates = voxel_coordinates * spacing
world_coordinates = stretched_voxel_coordinates + origin
return world_coordinates
def seq(start, stop, step=1):
n = int(round((stop - start)/float(step)))
if n > 1:
return([start + step*i for i in range(n+1)])
else:
return([])
'''
This function is used to create spherical regions in binary masks
at the given locations and radius.
'''
def draw_circles(image,cands,origin,spacing):
#make empty matrix, which will be filled with the mask
image_mask = np.zeros(image.shape, dtype=np.int16)
#run over all the nodules in the lungs
for ca in cands.values:
#get middel x-,y-, and z-worldcoordinate of the nodule
#radius = np.ceil(ca[4])/2 ## original: replaced the ceil with a very minor increase of 1% ....
radius = (ca[4])/2 + 0.51 * spacing[0] # increasing by circa half of distance in z direction .... (trying to capture wider region/border for learning ... and adress the rough net .
coord_x = ca[1]
coord_y = ca[2]
coord_z = ca[3]
image_coord = np.array((coord_z,coord_y,coord_x))
#determine voxel coordinate given the worldcoordinate
image_coord = world_2_voxel(image_coord,origin,spacing)
#determine the range of the nodule
#noduleRange = seq(-radius, radius, RESIZE_SPACING[0]) # original, uniform spacing
noduleRange_z = seq(-radius, radius, spacing[0])
noduleRange_y = seq(-radius, radius, spacing[1])
noduleRange_x = seq(-radius, radius, spacing[2])
#x = y = z = -2
#create the mask
for x in noduleRange_x:
for y in noduleRange_y:
for z in noduleRange_z:
coords = world_2_voxel(np.array((coord_z+z,coord_y+y,coord_x+x)),origin,spacing)
#if (np.linalg.norm(image_coord-coords) * RESIZE_SPACING[0]) < radius: ### original (contrained to a uniofrm RESIZE)
if (np.linalg.norm((image_coord-coords) * spacing)) < radius:
image_mask[int(np.round(coords[0])),int(np.round(coords[1])),int(np.round(coords[2]))] = int(1)
return image_mask
'''
This function takes the path to a '.mhd' file as input and
is used to create the nodule masks and segmented lungs after
rescaling to 1mm size in all directions. It saved them in the .npz
format. It also takes the list of nodule locations in that CT Scan as
input.
'''
def load_scans_masks_or_blanks(luna_subset, useAll, use_unsegmented=True):
#luna_subset = "[0-6]"
LUNA_DIR = LUNA_BASE_DIR % luna_subset
files = glob.glob(''.join([LUNA_DIR,'*.mhd']))
annotations = pd.read_csv(LUNA_ANNOTATIONS)
annotations.head()
candidates = pd.read_csv(LUNA_CANDIDATES)
candidates_false = candidates[candidates["class"] == 0] # only select the false candidates
candidates_true = candidates[candidates["class"] == 1] # only select the false candidates
sids = []
scans = []
masks = []
blankids = [] # class/id whether scan is with nodule or without, 0 - with, 1 - without
cnt = 0
skipped = 0
#file=files[7]
for file in files:
imagePath = file
seriesuid = file[file.rindex('/')+1:] # everything after the last slash
seriesuid = seriesuid[:len(seriesuid)-len(".mhd")] # cut out the suffix to get the uid
path = imagePath[:len(imagePath)-len(".mhd")] # cut out the suffix to get the uid
if use_unsegmented:
path_segmented = path.replace("original_lungs", "lungs_2x2x2", 1)
else:
path_segmented = path.replace("original_lungs", "segmented_2x2x2", 1)
cands = annotations[seriesuid == annotations.seriesuid] # select the annotations for the current series
ctrue = candidates_true[seriesuid == candidates_true.seriesuid]
cfalse = candidates_false[seriesuid == candidates_false.seriesuid]
blankid = 1 if (len(cands) == 0 and len(ctrue) == 0 and len(cfalse) > 0) else 0
skip_nodules_entirely = False # was False
use_only_nodules = False
if skip_nodules_entirely and blankid ==0:
## manual switch to generate extra data for the corrupted set
print("Skipping nodules (skip_nodules_entirely) ", seriesuid)
skipped += 1
elif use_only_nodules and (len(cands) == 0):
## manual switch to generate only nodules data due lack of time and repeat etc time pressures
print("Skipping blanks (use_only_nodules) ", seriesuid)
skipped += 1
else: # NORMAL operations
if (len(cands) > 0 or
(blankid >0) or
useAll):
sids.append(seriesuid)
blankids.append(blankid)
if use_unsegmented:
scan_z = np.load(''.join((path_segmented + '_lung' + '.npz')))
else:
scan_z = np.load(''.join((path_segmented + '_lung_seg' + '.npz')))
scan = scan_z['arr_0']
mask_z = np.load(''.join((path_segmented + '_nodule_mask_wblanks' + '.npz')))
mask = mask_z['arr_0']
testPlot = False
if testPlot:
maskcheck_z = np.load(''.join((path_segmented + '_nodule_mask' + '.npz')))
maskcheck = maskcheck_z['arr_0']
f, ax = plt.subplots(1, 2, figsize=(10,5))
ax[0].imshow(np.sum(np.abs(maskcheck), axis=0),cmap=plt.cm.gray)
ax[1].imshow(np.sum(np.abs(mask), axis=0),cmap=plt.cm.gray)
#ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans.append(scan)
masks.append(mask)
cnt += 1
else:
print("Skipping non-nodules and non-blank entry ", seriesuid)
skipped += 1
print ("Summary: cnt & skipped: ", cnt, skipped)
return scans, masks, sids, blankids
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.028 ## for LUNA subset 0 and our preprocessing, only with nudels was 0.028, all was 0.020421744071562546 (in the tutorial they used 0.25)
def zero_center(image):
image = image - PIXEL_MEAN
return image
def convert_scans_and_masks_xd3(scans, masks, only_with_nudels, dim=3, crop=16, blanks_per_axis = 4, add_blank_spacing_size=0, add_blank_layers = 0):
# reuse scan to reduce memory footprint
dim_orig = dim
skip_low = dim // 2 # dim shoudl be uneven -- it is recalculated anyway to this end
skip_high = dim -skip_low - 1
do_not_allow_even_dim = False ## now we allow odd numbers ...
if do_not_allow_even_dim:
dim = 2 * skip_low + 1
skip_low = dim // 2
skip_high = dim -skip_low - 1
if dim != dim_orig:
print ("convert_scans_and_masks_x: Dim must be uneven, corrected from .. to:", dim_orig, dim)
work = []
for scan in scans:
tmp = []
for i in range(skip_low, scan.shape[0]-skip_high):
#img1 = scan[i-1]
#img2 = scan[i]
#img3 = scan[i+1]
#rgb = np.stack((img1, img2, img3))
rgb = np.stack(scan[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
scans1 = np.stack([val for sublist in work for val in sublist ]) # NO skipping as we have already cut the first and the last layer
work = []
dxrange = scans[0].shape[-1] - 2 * crop
dyrange = scans[0].shape[-2] - 2 * crop
if add_blank_spacing_size > 0:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for i in range(skip_low+(add_blank_spacing_size//2), mask.shape[0]-skip_high, add_blank_spacing_size):
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
if add_blank_layers > 0:
for mask in masks:
if (np.min(mask) < 0):
dzrange = mask.shape[0]-dim
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
for k in range(add_blank_layers):
i = np.random.randint(0, dzrange) + skip_low
#print ("dz position, random, mask.shape ", i, mask.shape)
mask[i, np.random.randint(0,dyrange), np.random.randint(0,dxrange)] = -1 # negative pixel to be picked up below and corrected back to none
add_random_blanks_in_blanks = False ## NO need for the extra random blank pixels now, 20170327
if add_random_blanks_in_blanks:
for mask in masks:
if (np.min(mask) < 0):
## we have a blank
### ADD ariticial mask pixel every add_blank_spacing layers for each blankids ...
# set the (0,0) pixel to -1 every add_blank_spacing_size for blanks ..
#zlow = skip_low
#zhigh = mask.shape[0]-skip_high
pix_sum = np.sum(mask, axis=(1,2))
idx_blanks = np.min(mask, axis=(1,2)) < 0 ## don't use it - let's vary the position across the space
for iz in range(mask.shape[0]):
if (np.min(mask[iz])) < 0:
for ix in range(blanks_per_axis):
#xpos = crop + (ix)*dx + dx //2
for iy in range(blanks_per_axis):
#ypos = crop + (iy)*dy + dy //2
xpos = crop + np.random.randint(0,dxrange)
ypos = crop + np.random.randint(0,dyrange)
#print (iz, xpos, ypos)
#mask[idx_blanks, ypos, xpos] = -1 # negative pixel to be picked up below and corrected back to none
mask[iz, ypos, xpos] = -1
use_3d_mask = True ##
if use_3d_mask:
work = [] # 3 layers
for mask in masks:
tmp = []
#i = 0
for i in range(skip_low, mask.shape[0]-skip_high):
rgb = np.stack(mask[i-skip_low:i+skip_high+1])
tmp.append(rgb)
work.append(np.array(tmp))
masks1 = np.stack([val for sublist in work for val in sublist ] )# NO skipping as we have already cut the first and the last layer
else:
masks1 = np.stack([val for sublist in masks for val in sublist[skip_low:-skip_high]] ) # skip one element at the beginning and at the end
if only_with_nudels:
if use_3d_mask:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2,3)) ## USE ANY March 1; CHANGE IT WED - use ANY i.e. remove skip_low abd added for the potential blanks; modified that the centre mask be mask!
else:
nudels_pix_count = np.sum(np.abs(masks1), axis = (1,2))
scans1 = scans1[nudels_pix_count != 0]
masks1 = masks1[nudels_pix_count != 0]
scans1 = normalize(scans1)
useTestPlot = False
if useTestPlot:
plt.hist(scans1.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
for i in range(20):
print ('scan '+str(i))
f, ax = plt.subplots(1, 3, figsize=(15,5))
ax[0].imshow(scans1[i,:,:],cmap=plt.cm.gray)
ax[1].imshow(scans[i,:,:],cmap=plt.cm.gray)
ax[2].imshow(masks1[i,:,:],cmap=plt.cm.gray)
plt.show()
scans1 = zero_center(scans1)
scans1 = scans1.astype(np.float32) # make it float 32 (not point carring 64, also because kears operates on float32, and originals were in int
if use_3d_mask:
done = 1 # nothing to do
else:
masks = np.copy(masks1)
masks1=np.zeros((masks.shape[0],1,img_rows,img_cols))
for i in range(masks.shape[0]):
img=masks[i,:,:]
###img =cv2.resize(img, (img_rows, img_cols)) ## add/test resizing if needed
masks1[i,0,:,:]=img
return scans1, masks1
def eliminate_incorrectly_segmented(scans, masks):
skip = dim // 2 # To Change see below ...
sxm = scans * masks
near_air_thresh = (-900 - MIN_BOUND) / (MAX_BOUND - MIN_BOUND) - PIXEL_MEAN # version 3 # -750 gives one more (for 0_3, d4, -600 give 15 more than -900
#near_air_thresh #0.08628 for -840 # 0.067 # for -867; 0.1148 for -800
cnt = 0
for i in range(sxm.shape[0]):
#sx = sxm[i,skip]
sx = sxm[i]
mx = masks[i]
if np.sum(mx) > 0: # only check non-blanks ...(keep blanks)
sx_max = np.max(sx)
if (sx_max) <= near_air_thresh:
cnt += 1
print ("Entry, count # and max: ", i, cnt, sx_max)
print (stats.describe(sx, axis=None))
#plt.imshow(sx, cmap='gray')
plt.imshow(sx[0,skip], cmap='gray') # selecting the mid entry
plt.show()
s_eliminate = np.max(sxm, axis=(1,2,3,4)) <= near_air_thresh # 3d
s_preserve = np.max(sxm, axis=(1,2,3,4)) > near_air_thresh #3d
s_eliminate_sum = sum(s_eliminate)
s_preserve_sum = sum(s_preserve)
print ("Eliminate, preserve =", s_eliminate_sum, s_preserve_sum)
masks = masks[s_preserve]
scans = scans[s_preserve]
del(sxm)
return scans, masks
def grid_data(source, grid=32, crop=16, expand=12):
gridsize = grid + 2 * expand
stacksize = source.shape[0]
height = source.shape[3] # should be 224 for our data
width = source.shape[4]
gridheight = (height - 2 * crop) // grid # should be 6 for our data
gridwidth = (width - 2 * crop) // grid
cells = []
for j in range(gridheight):
for i in range (gridwidth):
cell = source[:,:,:, crop+j*grid-expand:crop+(j+1)*grid+expand, crop+i*grid-expand:crop+(i+1)*grid+expand]
cells.append(cell)
cells = np.vstack (cells)
return cells, gridwidth, gridheight
def data_from_grid (cells, gridwidth, gridheight, grid=32):
height = cells.shape[3] # should be 224 for our data
width = cells.shape[4]
crop = (width - grid ) // 2 ## for simplicity we are assuming the same crop (and grid) vertically and horizontally
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
if crop > 0: # do NOT crop with 0 as we get empty cells ...
cells = cells[:,:,:,crop:-crop,crop:-crop]
if crop > 2*grid:
print ("data_from_grid Warning, unusually large crop (> 2*grid); crop, & grid, gridwith, gridheight: ", (crop, grid, gridwidth, gridheight))
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
new_shape = (gridwidth * gridheight, new_shape_1_dim, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
cells = np.reshape(cells, new_shape)
cells = np.moveaxis(cells, 0, -3)
shape = cells.shape
new_shape2 = tuple([x for x in shape[0:3]]) + (gridheight, gridwidth,) + tuple([x for x in shape[4:]])
cells = np.reshape(cells, new_shape2)
cells = cells.swapaxes(-2, -3)
shape = cells.shape
combine_shape =tuple([x for x in shape[0:3]]) + (shape[-4]*shape[-3], shape[-2]*shape[-1],)
cells = np.reshape(cells, combine_shape)
return cells
def data_from_grid_by_proximity (cells, gridwidth, gridheight, grid=32):
# disperse the sequential dats into layers and then use data_from_grid
dspacing = gridwidth * gridheight
layers = cells.shape[0] // dspacing
shape = cells.shape
new_shape_1_dim = shape[0]// (gridwidth * gridheight) # ws // 36 -- Improved on 20170306
### NOTE tha we invert the order of shapes below to get the required proximity type ordering
new_shape = (new_shape_1_dim, gridwidth * gridheight, ) + tuple([x for x in shape][1:]) # was 36, Improved on 20170306
# swap ordering of axes
cells = np.reshape(cells, new_shape)
cells = cells.swapaxes(0, 1)
cells = np.reshape(cells, shape)
cells = data_from_grid (cells, gridwidth, gridheight, grid)
return cells
def find_voxels(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, mid_mask_only = True, find_blanks_also = True, centralcutonly=True):
zsel = dim // 2
sstart = 0
send = images3.shape[0]
if mid_mask_only:
pmav = pmasks3[:,0,dim // 2] # using the mid mask
pmav.shape
else:
pmav = pmasks3[:,0] ### NOTE this variant has NOT been tested fully YET
run_UNNEEDED_code = False
ims = images3[sstart:send,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[sstart:send,0,zsel]
ims.shape
#pms = pmasks3[sstart:send,0,0]
pms = pmav[sstart:send]
images3.shape
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
# threshold the precited nasks ...
#for thresh in [0.5, 0.9, 0.9999]:
#for thresh in [0.5, 0.75, 0.9, 0.95, 0.98, 0.99, 0.999, 0.9999, 0.99999, 0.999999, 0.9999999]:
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
axis = 1
nodules_projections = []
for axis in range(3):
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = np.sum(nodls_projection)
nodules_projections.append(nodls_projection)
idx.shape
## find the individual nodules ... as per the specified probabilities
labs, labs_num = measure.label(idx, return_num = True, neighbors = 8 , background = 0) # label the nodules in 3d, allow for diagonal connectivity
voxels = []
vmasks = []
if labs_num > 0 and labs.shape[0] >1: # checking for height > 1 is needed as measure.regionprops fails when it is not, for instance for shape (1, 20, 20) we get ValueError: Label and intensity image must have the same shape.
print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
"area": areas,
"diameter": diameters,
"bbox": bboxes
},
index=labs_ids)
nodules_count = len(dfn) # 524 for file 1 of part 8 ..
max_nodules_count = voxelscountmax
n=0
for n in range(max_nodules_count):
if n < len(dfn): # use the nodule data, otheriwse empty
bb = dfn.iloc[n]["bbox"]
zmin = bb[0]
zmax = bb[3]
zlen = bb[3] - bb[0]
ylen = bb[4] - bb[1]
xlen = bb[5] - bb[2]
xmin = np.max([bb[2] - np.max([(grid - xlen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
xmax = np.min([xmin + grid, ims.shape[2]]) ## do not beyond the right side
xmin = xmax - grid
if (xmax - xmin) != grid:
print ("ERROR in calculating the cut-offs ..., xmin, xmax =", xmin, xmax)
ymin = np.max([bb[1] - np.max([(grid - ylen ) //2, 0]), 0]) ## do not go beyond 0/left side of the image
ymax = np.min([ymin + grid, ims.shape[1]]) ## do not beyond the right side
ymin = ymax - grid
if (ymax - ymin) != grid:
print ("ERROR in calculating the cut-offs ..., ymin, ymax =", ymin, ymax)
zmin_sel = zmin
zmax_sel = zmax
if centralcutonly: #include only one voxel representation
zmin_sel = zmin + zlen // 2
zmax_sel = zmin_sel + 1
iz=zmin_sel # for testing
for iz in range(zmin_sel,zmax_sel):
voxel = images3[iz,:,:, ymin:ymax, xmin:xmax]
vmask = pmasks3[iz,:,:, ymin:ymax, xmin:xmax]
voxels.append(voxel)
vmasks.append(vmask)
testPlot = False
if testPlot:
print ('scan '+str(iz))
f, ax = plt.subplots(1, 8, figsize=(24,3))
ax[0].imshow(nodls[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[1].imshow(ims[iz,ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[2].imshow(images3_amp[iz,0, dim//2, ymin:ymax, xmin:xmax],cmap=plt.cm.gray)
ax[3].imshow(voxel[0,dim//2],cmap=plt.cm.gray)
ax[4].imshow(voxel[0,dim],cmap=plt.cm.gray)
ax[5].imshow(voxel[0,dim+1],cmap=plt.cm.gray)
ax[6].imshow(voxel[0,dim+2],cmap=plt.cm.gray)
ax[7].imshow(voxel[0,dim+3],cmap=plt.cm.gray)
if len(voxels) > 0:
voxel_stack = np.stack(voxels)
vmask_stack = np.stack(vmasks)
else:
print_warning = False
if print_warning:
print("WARNING, find_voxels, not single voxel found even though expected")
voxel_stack = []
vmask_stack = []
if testPlot:
print ('voxels count ', len(voxel_stack))
for ii in range(0,len(voxel_stack),len(voxel_stack)//10):
f, ax = plt.subplots(1, 2, figsize=(6,3))
ax[0].imshow(voxel_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
ax[1].imshow(vmask_stack[ii, 0, dim // 2],cmap=plt.cm.gray)
return voxel_stack, vmask_stack
def measure_voxels(labs, ims):
#print("Befpre measure.regionprops, labs & intensity shapes: ", labs.shape, ims.shape)
regprop = measure.regionprops(labs, intensity_image=ims) # probkem here on 20170327
voxel_volume = np.product(RESIZE_SPACING)
areas = [rp.area for rp in regprop] # this is in cubic mm now (i.e. should really be called volume)
volumes = [rp.area * voxel_volume for rp in regprop]
diameters = [2 * (3* volume / (4 * np.pi ))**0.3333 for volume in volumes]
labs_ids = [rp.label for rp in regprop]
#ls = [rp.label for rp in regprop]
max_val = np.max(areas)
max_index = areas.index(max_val)
max_label = regprop[max_index].label
bboxes = [r.bbox for r in regprop]
#max_ls = ls[max_index]
idl = labs == regprop[max_index].label # 400
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
run_UNNEEDED_code = False
if run_UNNEEDED_code:
nodules_hu_reg = []
for rp in regprop:
idl = labs == rp.label
nodules_pixels = ims[idl]
nodules_hu = pix_to_hu(nodules_pixels)
nodules_hu_reg.append(nodules_hu) # NOTE some are out of interest, i.e. are equal all (or near all) to MAX_BOUND (400)
dfn = pd.DataFrame(
{
#"zcenter": zcenters,
#"ycenter": ycenters,
#"xcenter": xcenters,
"area": areas,
"diameter": diameters,
#"irreg_vol": irreg_vol,
#"irreg_shape": irreg_shape,
#"nodules_hu": nodules_hu_reg,
"bbox": bboxes
},
index=labs_ids)
return dfn
def find_voxels_and_blanks(dim, grid, images3, images3_seg, pmasks3, nodules_threshold=0.999, voxelscountmax = 1000, find_blanks_also = True, centralcutonly=True, diamin=2, diamax=10):
if np.sum(pmasks3) > 0:
centralcutonly = False # override centralcut for True nodule masks
zsel = dim // 2 if centralcutonly else range(0,dim)
pmav = pmasks3[:,0,zsel]
ims = images3[:,0,zsel] # selecting the zsel cut for nodules calc ...
ims_seg = images3_seg[:,0,zsel]
sstart = 0
send = images3.shape[0]
pms = pmav[sstart:send]
run_UNNEEDED_code = False
thresh = nodules_threshold # for testing , set it here and skip the loop
segment = 2 # for compatibility of the naming convention
for thresh in [nodules_threshold]: # jusst this one - keeping loop for a while
if find_blanks_also:
idx = np.abs(pms) > thresh
else:
idx = pms > thresh
idx.shape
nodls = np.zeros(pms.shape).astype(np.int16)
nodls[idx] = 1
nx = nodls[idx]
volume = np.sum(nodls) # A check calculation ... :wcounted as a count within hu_describe
nodules_pixels = ims[idx] # flat
nodules_hu = pix_to_hu(nodules_pixels)
part_name = ''.join([str(segment), '_', str(thresh)])
### DO NOT do them here
use_corrected_nodules = True # do it below from 20170311
if not use_corrected_nodules:
df = hu_describe(nodules_hu, uid=uid, part=part_name)
add_projections = False
if add_projections:
nodules_projections = []
for axis in range(3):
#sxm_projection = np.max(sxm, axis = axis)
nodls_projection = np.max(nodls, axis=axis)
naxis_name = ''.join(["naxis_", str(axis),"_", part_name])
if add_projections:
df[naxis_name] = | np.sum(nodls_projection) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Batch learner for temporal difference Q learning
Should converge to standard temporal difference Q learning for batchsize=1
"""
import numpy as np
class QBatch():
'''
Nice doc string.
'''
def __init__(self,
obs_action_space,
alpha,
beta,
gamma,
batchsize=1,
Xinit=None,
Qoa=None):
self.alpha = alpha # learning stepsize / rate
self.beta = beta # intensity of choice
self.gamma = gamma # discout factor
# value table: gets updateded while acting with the same policy
self.valQoa = self._init_ObsActionValues(obs_action_space)
# actor table: used for acting, gets updated in learning step
self.actQoa = self._init_ObsActionValues(obs_action_space)
if Xinit is not None and Xinit.shape == self.valQoa.shape:
assert np.allclose(Xinit.sum(-1), 1), 'Xinit must be probabiliy'
self.valQoa = (np.log(Xinit)/self.beta)\
- np.mean( | np.log(Xinit) | numpy.log |
import numpy as np
import pandas as pd
def create_ranked_movies(movies_df: pd.DataFrame, reviews_df: pd.DataFrame):
'''
INPUT
movies - the movies dataframe
reviews - the reviews dataframe
OUTPUT
ranked_movies - a dataframe with movies that are sorted by highest avg rating, more reviews, then time, and must have more than 4 ratings
'''
rating_mean = reviews_df.groupby('movie_id')['rating'].mean()
rating_count = reviews_df.groupby('movie_id')['user_id'].count()
rating_latest = reviews_df.groupby('movie_id')['timestamp'].max()
rating_df = pd.concat([rating_mean, rating_count, rating_latest], axis=1)
rating_df.columns = ['mean', 'count', 'latest_ts']
ranked_movies = movies_df.merge(rating_df, left_on='movie_id', right_index=True)
ranked_movies = ranked_movies.sort_values(["mean","count","latest_ts"], ascending=False)
ranked_movies = ranked_movies[ranked_movies['count'] > 4][["movie", "mean","count","latest_ts"]]
return ranked_movies
def popular_recommendation(n_top:int, ranked_movies:pd.DataFrame):
'''
INPUT:
n_top - the number of recommendation returned
ranked_movies - DataFrame of ranked movie
OUTPUT:
result - list of recommended movies name
'''
return list(ranked_movies['movie'].head(n_top))
def find_similiar_movies(movie_id:int, movies_df:pd.DataFrame) -> str:
'''
INPUT:
movie_id - movie id
movies_df - movie DataFrame
OUTPUT:
result - name of the recommended movie
'''
#get row of given movie_id feature
movie_mat = np.array(movies_df[movies_df['movie_id'] == movie_id].iloc[:,5:])[0]
#get feature matrix of all movies
movies_mat = np.array(movies_df.iloc[:,5:])
#calculate similiarity between given movie and all movie
dot_prod = movie_mat.dot(movies_mat.transpose())
#get the most likely movie
movie_rows = np.where(dot_prod == | np.max(dot_prod) | numpy.max |
"""
linear_network.py
This code is based off of mnielsen's work with a couple of modifications
The original code can be found at https://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/src/network.py
"""
#### Libraries
# Standard library
import random
# Third-party libraries
import numpy as np
class LinearNetwork:
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = identity_func(np.dot(w, a) + b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs."""
if not test_data:
test_data = training_data
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
print("Epoch {0}: {1}".format(j, self.evaluate(test_data)))
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
x = np.column_stack([example[0] for example in mini_batch])
y = np.column_stack([example[1] for example in mini_batch])
nabla_b, nabla_w = self.backprop(x, y)
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = | np.dot(w, activation) | numpy.dot |
#!usr/bin/python 3.6
#-*-coding:utf-8-*-
'''
@file: da.py, deterministic annealing algorithm
@Author: <NAME> (<EMAIL>)
@Date: 11/28/2019
@Paper reference: Clustering with Capacity and Size Constraints: A Deterministic Approach
'''
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
import collections
import random
from scipy.spatial.distance import cdist
import os
import sys
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path)
import base
class DeterministicAnnealing(base.Base):
def __init__(self, n_clusters, distribution, max_iters=1000,
distance_func=cdist, random_state=42, T=None):
'''
Args:
n_clusters (int): number of clusters
distribution (list): a list of ratio distribution for each cluster
T (list): inverse choice of beta coefficients
'''
super(DeterministicAnnealing, self).__init__(n_clusters, max_iters, distance_func)
self.lamb = distribution
assert np.sum(distribution) == 1
assert len(distribution) == n_clusters
assert isinstance(T, list) or T is None
self.beta = None
self.T = T
self.cluster_centers_ = None
self.labels_ = None
self._eta = None
self._demands_prob = None
random.seed(random_state)
np.random.seed(random_state)
def fit(self, X, demands_prob=None):
# setting T, loop
T = [1, 0.1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]
solutions = []
diff_list = []
is_early_terminated = False
n_samples, n_features = X.shape
self.capacity = [n_samples * d for d in self.lamb]
if demands_prob is None:
demands_prob = np.ones((n_samples, 1))
else:
demands_prob = np.asarray(demands_prob).reshape((-1, 1))
assert demands_prob.shape[0] == X.shape[0]
demands_prob = demands_prob / sum(demands_prob)
for t in T:
self.T = t
centers = self.initial_centers(X)
eta = self.lamb
labels = None
for _ in range(self.max_iters):
self.beta = 1. / self.T
distance_matrix = self.distance_func(X, centers)
eta = self.update_eta(eta, demands_prob, distance_matrix)
gibbs = self.update_gibbs(eta, distance_matrix)
centers = self.update_centers(demands_prob, gibbs, X)
self.T *= 0.999
labels = np.argmax(gibbs, axis=1)
if self._is_satisfied(labels): break
solutions.append([labels, centers])
resultant_clusters = len(collections.Counter(labels))
diff_list.append(abs(resultant_clusters - self.n_clusters))
if resultant_clusters == self.n_clusters:
is_early_terminated = True
break
# modification for non-strictly satisfaction, only works for one demand per location
# labels = self.modify(labels, centers, distance_matrix)
if not is_early_terminated:
best_index = np.argmin(diff_list)
labels, centers = solutions[best_index]
self.cluster_centers_ = centers
self.labels_ = labels
self._eta = eta
self._demands_prob = demands_prob
def predict(self, X):
distance_matrix = self.distance_func(X, self.cluster_centers_)
eta = self.update_eta(self._eta, self._demands_prob, distance_matrix)
gibbs = self.update_gibbs(eta, distance_matrix)
labels = np.argmax(gibbs, axis=1)
return labels
def modify(self, labels, centers, distance_matrix):
centers_distance = self.distance_func(centers, centers)
adjacent_centers = {i: np.argsort(centers_distance, axis=1)[i, 1:3].tolist() for i in range(self.n_clusters)}
while not self._is_satisfied(labels):
count = collections.Counter(labels)
cluster_id_list = list(count.keys())
random.shuffle(cluster_id_list)
for cluster_id in cluster_id_list:
num_points = count[cluster_id]
diff = num_points - self.capacity[cluster_id]
if diff <= 0:
continue
adjacent_cluster = None
adjacent_cluster = random.choice(adjacent_centers[cluster_id])
if adjacent_cluster is None:
continue
cluster_point_id = np.where(labels==cluster_id)[0].tolist()
diff_distance = distance_matrix[cluster_point_id, adjacent_cluster] \
- distance_matrix[cluster_point_id, cluster_id]
remove_point_id = np.asarray(cluster_point_id)[np.argsort(diff_distance)[:diff]]
labels[remove_point_id] = adjacent_cluster
return labels
def initial_centers(self, X):
selective_centers = random.sample(range(X.shape[0]), self.n_clusters)
centers = X[selective_centers]
return centers
def _is_satisfied(self, labels):
count = collections.Counter(labels)
for cluster_id in range(len(self.capacity)):
if cluster_id not in count:
return False
num_points = count[cluster_id]
if num_points > self.capacity[cluster_id]:
return False
return True
def update_eta(self, eta, demands_prob, distance_matrix):
n_points, n_centers = distance_matrix.shape
eta_repmat = np.tile(np.asarray(eta).reshape(1, -1), (n_points, 1))
exp_term = np.exp(- self.beta * distance_matrix)
divider = exp_term / np.sum(np.multiply(exp_term,
eta_repmat), axis=1).reshape((-1, 1))
eta = np.divide(np.asarray(self.lamb),
np.sum(divider * demands_prob, axis=0))
return eta
def update_gibbs(self, eta, distance_matrix):
n_points, n_centers = distance_matrix.shape
eta_repmat = np.tile(np.asarray(eta).reshape(1, -1), (n_points, 1))
exp_term = np.exp(- self.beta * distance_matrix)
factor = np.multiply(exp_term, eta_repmat)
gibbs = factor / np.sum(factor, axis=1).reshape((-1, 1))
return gibbs
def update_centers(self, demands_prob, gibbs, X):
n_points, n_features = X.shape
divide_up = gibbs.T.dot(X * demands_prob)# n_cluster, n_features
p_y = np.sum(gibbs * demands_prob, axis=0) # n_cluster,
p_y_repmat = np.tile(p_y.reshape(-1, 1), (1, n_features))
centers = np.divide(divide_up, p_y_repmat)
return centers
if __name__ == "__main__":
X = []
n_points = 1000
random_state = 42
random.seed(random_state)
np.random.seed(random_state)
# demands = np.random.randint(1, 24, (n_points, 1))
X = np.random.rand(n_points, 2)
demands = | np.ones((n_points, 1)) | numpy.ones |
import numpy as np
import scipy
from numpy.fft import rfft,irfft
import os
import time
import librosa
from Audio_proc_lib.audio_proc_functions import *
import multiprocessing
import scipy.signal as sg
class scale_frame:
#FOR THE IMPLEMENTATION OF THE IRREGULAR MATRIX i assumed that Ln (window len) = Mn (FFT len)
#Painless case Ln<=Mn
#CONSTRUCTOR PARAMETERS
#1)ksi_s : sampling rate
#2)min_scl : minimal scale given in samples
#3)overlap_factor : the amount of overlap each new constructed window will have to its previous one (and the next one) given as a ratio
# Notes-> i.e. overlap_factor of 1/2 means that if the previous window is 512samples then the next one will overlap in 256samples (similar to hop size in STFT)
# For the first and the last windowes we used a tukey window and an overlap of 1/2 .
#4)onset_seq : The sequence of onsets produced by an onset detection algorithm
#5)middle_window : The middle window used in each get_window_interval procedure given as an object i.e. np.hanning or scipy.signal.tukey
#6)L : signal length in samples
#7)matrix_form : flag to indicate if will be calculated a regular matrix or irregular matrix
#8)multiproc : flag to indicate if it will use multiprocessing to compute the window for each onset interval indices in the get_window_interval procedure
# (recommended True)
def timeis(func):
'''Decorator that reports the execution time.'''
def wrap(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
print(func.__name__, end-start)
return result
return wrap
def cputime(self):
utime, stime, cutime, cstime, elapsed_time = os.times()
return utime
g = []
g_dual = []
def __init__(self,ksi_s,min_scl,overlap_factor,onset_seq,middle_window,L,matrix_form,multiproc):
self.ksi_s = ksi_s
self.onsets = onset_seq
self.min_scl=min_scl
self.overlap_factor = overlap_factor
self.multiprocessing = multiproc
self.middle_window = middle_window
self.L=L
self.matrix_form=matrix_form
#writing in the correct order the function calls in order for the FORWARD AND BACKWARD methods to work
#Creating the onset_tuples sequence
self.get_onset_tuples()
#Construction of the windows indices
if self.multiprocessing:
pool = multiprocessing.Pool(processes=4)
all_inds_list = list( pool.imap(self.get_windows_interval, self.onset_tuples) )
else:
all_inds_list = list( map( lambda x : self.get_windows_interval(x) , self.onset_tuples ) )
self.all_inds = []
for interval_inds in all_inds_list:
self.all_inds += interval_inds
self.get_first_last_window()
self.N = len(self.all_inds)
self.get_frame_operator()
def get_onset_tuples(self):
#onsets = librosa.onset.onset_detect(y=sig, sr=self.ksi_s, units="samples")
#putting manualy some onsets in the start and the end
#and then creating a sequence of onset tuples (each tuple contains two successive onsets)
self.onsets = np.insert( self.onsets , [0,len(self.onsets)] , [self.min_scl,(self.L-1)-self.min_scl] )
self.onset_tuples = []
for i in range(len(self.onsets)-1):
self.onset_tuples.append( (self.onsets[i],self.onsets[i+1]) )
def get_windows_interval(self,onset_tuple):
#Function to get the window start (a) , end (b) indices and window length
#for the windows between 2 onsets
#Params:
#1)onsets_tuple: the first and last onset for the interval under considaration
#2)self.min_scl: is the minimal scale that we apply to the two onsets (because they are the transient positions) (POWER OF 2)
#3)overlap_fact: the amount of the previous window that the next will overlap to the previous (must be a fraction greater than 1)
#Idea implemented:
#In the first onset we use the minimal scale and for the following windows we increase the scale by doubling it each time
# until the median (end + start)/2 of the interval . We use the symmetric windows in order to reash gradually the minimal
# scale again in the position of the second onset. For the median position we use another window.
#
#Constructing the windows for all onset intervals-----------------------------------------------------------------------------------
start = onset_tuple[0]
end = onset_tuple[1]
middle = (start + end )//2
win_len = self.min_scl
#Constructing the first symmetric windows--------------------------------------------------------------------
inds_dict = [ { "window" : np.hanning , "win_len" : win_len , "a" : start - win_len//2 , "b" : start + win_len//2 } ]
k = 0
while True:
k+=1
ovrlp = int(inds_dict[k-1]["win_len"]*self.overlap_factor)
window = np.hanning
win_len = win_len*2
a = inds_dict[k-1]["b"] - ovrlp
b = a + win_len
if b>middle:
break
# if (a+b)/2>middle:
# break
else:
inds_dict.append( { "window" : window , "win_len" : win_len , "a" : a , "b" : b } )
#Constructing the middle window---------------------------------------------------------------------------------------
window = self.middle_window
ovrlp = int(inds_dict[-1]["win_len"]*self.overlap_factor)
a = inds_dict[-1]["b"] - ovrlp
b = int( 2*middle - inds_dict[-1]["b"] ) + ovrlp
win_len = b - a
inds_dict.append( { "window" : window , "win_len" : win_len , "a" : a , "b" : b } )
#Constructing the first symmetric windows --------------------------------------------------------------------------------
# (we dont need the last symmetric window thats why the for loop goes until 0 )
for k in range(len(inds_dict)-2,0,-1):
tmp = inds_dict[k].copy()
tmp["a"] = int( 2*middle - inds_dict[k]["b"] )
tmp["b"] = int( 2*middle - inds_dict[k]["a"] )
inds_dict.append(tmp)
return inds_dict
def get_first_last_window(self):
#first_window
ovrlp = int(self.all_inds[0]["win_len"]*self.overlap_factor)
ovrlp = int(self.all_inds[0]["win_len"]*(1/2))
a = 0
b = self.all_inds[0]["a"] + ovrlp
win_len = b - a
first_window_inds = { "win_len" : win_len , "a" : a , "b" : b }
#last_window
#ovrlp = int(self.all_inds[len(self.all_inds)-1]["win_len"]*self.overlap_factor)
ovrlp = int(self.all_inds[len(self.all_inds)-1]["win_len"]*(1/2))
a = self.all_inds[len(self.all_inds)-1]["b"] - ovrlp
b = self.L
win_len = b - a
last_window_inds = { "win_len" : win_len , "a" : a , "b" : b }
self.all_inds = [first_window_inds] + self.all_inds + [last_window_inds]
def plot_windows(self):
#Plot the windows for a small 3sec exerpt of the signal
if self.L/44100<=7.0:
#first window using Tukey
z_tmp = np.zeros(self.L)
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
Ln = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[:Ln]
z_tmp[inds] = gn
plt.plot(z_tmp)
for k in range(1,self.N-1):
z_tmp = np.zeros(self.L)
inds = np.arange( self.all_inds[k]["a"],self.all_inds[k]["b"] )
z_tmp[inds] = self.all_inds[k]["window"]( self.all_inds[k]["win_len"] )
plt.plot(z_tmp)
#last window using Tukey
z_tmp = np.zeros(self.L)
inds = np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] )
Ln = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[Ln:]
z_tmp[inds] = gn
plt.plot(z_tmp)
plt.show()
# plt.axvline(start)
# plt.axvline(end)
# plt.axvline(middle)
# plt.show()
def get_frame_operator(self):
#CONSTRUCTING THE FRAME OPERATOR-----------------------------------------------
self.frame_operator = np.zeros(self.L)
#MATRIX FORM CASE:
if self.matrix_form:
#calculate the max window length:
self.M = np.array( list( map( lambda x : x["win_len"] , self.all_inds ) ) ).max()
#first window using Tukey
nb_zeros_concat = self.M-self.all_inds[0]["win_len"]
bnew = self.all_inds[0]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[0]["a"],bnew )
Ln = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[:Ln]
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
self.frame_operator[ inds ] += (gn**2)
#The remaining windows--------------------------------------------------------------------
for n in range(1,self.N//2):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
bnew = self.all_inds[n]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[n]["a"],bnew )
Ln = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"]( Ln )
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
self.frame_operator[ inds ] += (gn**2)
#After the self.N//2 window we update the a inds in order to avoid indices problems out of range
for n in range(self.N//2,self.N-1):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
anew = self.all_inds[n]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[n]["b"] )
Ln = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"]( Ln )
gn = np.concatenate(( np.zeros(nb_zeros_concat),gn ))
self.frame_operator[ inds ] += (gn**2)
#last window using Tukey
nb_zeros_concat = self.M-self.all_inds[self.N-1]["win_len"]
anew = self.all_inds[self.N-1]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[self.N-1]["b"] )
Ln = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[Ln:]
gn = np.concatenate(( np.zeros(nb_zeros_concat) ,gn ))
self.frame_operator[ inds ] += (gn**2)
#IRREGULAR MATRIX CASE:
else:
#first window using Tukey
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
Ln = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[:Ln]
self.frame_operator[ inds ] += (gn**2)
#The remaining windows
for n in range(1,self.N-1):
inds = np.arange( self.all_inds[n]["a"],self.all_inds[n]["b"] )
Ln = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"]( Ln )
self.frame_operator[ inds ] += (gn**2)
#last window using Tukey
inds = np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] )
Ln = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( Ln*2 ) , Ln )[Ln:]
self.frame_operator[ inds ] += (gn**2)
@timeis
def forward(self,signal):
c = []
#MATRIX FORM CASE:
if self.matrix_form:
#first window using Tukey
nb_zeros_concat = self.M-self.all_inds[0]["win_len"]
bnew = self.all_inds[0]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[0]["a"],bnew )
fft_len = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#The remaining windows----------------------------------------------------------------------------------------
for n in range(1,self.N//2):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
bnew = self.all_inds[n]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[n]["a"],bnew )
fft_len = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"](fft_len)
gn = np.concatenate(( gn,np.zeros(nb_zeros_concat) ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#After the self.N//2 window we update the a inds in order to avoid indices problems out of range
for n in range(self.N//2,self.N-1):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
anew = self.all_inds[n]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[n]["b"] )
fft_len = self.all_inds[n]["win_len"]
gn = self.all_inds[n]["window"](fft_len)
gn = np.concatenate(( np.zeros(nb_zeros_concat),gn ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#last window using Tukey
nb_zeros_concat = self.M-self.all_inds[self.N-1]["win_len"]
anew = self.all_inds[self.N-1]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[self.N-1]["b"] )
fft_len = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[fft_len:]
gn = np.concatenate(( np.zeros(nb_zeros_concat) ,gn ))
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#IRREGULAR MATRIX CASE:
else:
#first window using Tukey
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
fft_len = self.all_inds[0]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#The remaining windows
for n in range(1,self.N-1):
fft_len = self.all_inds[n]["win_len"]
inds = np.arange(self.all_inds[n]["a"],self.all_inds[n]["b"])
gn = self.all_inds[n]["window"](fft_len)
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
#last window using Tukey
inds = np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] )
fft_len = self.all_inds[self.N-1]["win_len"]
gn = np.roll( sg.tukey( fft_len*2 ) , fft_len )[fft_len:]
c.append( rfft( signal[inds]*gn , norm="ortho" ) )
return c
@timeis
def backward(self,c):
f_rec = np.zeros(self.L)
if self.matrix_form:
#first window using Tukey
nb_zeros_concat = self.M-self.all_inds[0]["win_len"]
bnew = self.all_inds[0]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[0]["a"],bnew )
fft_len = self.all_inds[0]["win_len"]
fn = np.real( irfft( c[0] , norm="ortho" ) )
gn_dual = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]
gn_dual = np.concatenate(( gn_dual,np.zeros(nb_zeros_concat) ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
for n in range(1,self.N//2):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
bnew = self.all_inds[n]["b"] + nb_zeros_concat
inds = np.arange( self.all_inds[n]["a"],bnew )
fft_len = self.all_inds[n]["win_len"]
fn = np.real( irfft( c[n] , norm="ortho" ) )
gn_dual = self.all_inds[n]["window"](fft_len)
gn_dual = np.concatenate(( gn_dual,np.zeros(nb_zeros_concat) ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
#After the self.N//2 window we update the a inds in order to avoid indices problems out of range
for n in range(self.N//2,self.N-1):
nb_zeros_concat = self.M-self.all_inds[n]["win_len"]
anew = self.all_inds[n]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[n]["b"] )
fft_len = self.all_inds[n]["win_len"]
fn = np.real( irfft( c[n] , norm="ortho" ) )
gn_dual = self.all_inds[n]["window"](fft_len)
gn_dual = np.concatenate(( np.zeros(nb_zeros_concat),gn_dual ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
#last window using Tukey
nb_zeros_concat = self.M-self.all_inds[self.N-1]["win_len"]
anew = self.all_inds[self.N-1]["a"] - nb_zeros_concat
inds = np.arange( anew,self.all_inds[self.N-1]["b"] )
fft_len = self.all_inds[self.N-1]["win_len"]
fn = np.real( irfft( c[self.N-1] , norm="ortho" ) )
gn_dual = np.roll( sg.tukey( fft_len*2 ) , fft_len )[fft_len:]
gn_dual = np.concatenate(( np.zeros(nb_zeros_concat),gn_dual ))/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
else:
#self.get_frame_operator()
#first window using Tukey
inds = np.arange( self.all_inds[0]["a"],self.all_inds[0]["b"] )
fft_len = self.all_inds[0]["win_len"]
fn = np.real( irfft( c[0] , norm="ortho" ) )
gn_dual = np.roll( sg.tukey( fft_len*2 ) , fft_len )[:fft_len]/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
for n in range(1,self.N-1):
fft_len = self.all_inds[n]["win_len"]
inds = np.arange(self.all_inds[n]["a"],self.all_inds[n]["b"])
fn = np.real( irfft( c[n] , norm="ortho" ) )
gn_dual = self.all_inds[n]["window"](fft_len)/self.frame_operator[inds]
f_rec[inds] += fn*gn_dual
#last window using Tukey
inds = | np.arange( self.all_inds[self.N-1]["a"],self.all_inds[self.N-1]["b"] ) | numpy.arange |
import numpy as np
import csv
import math
class Logger:
def __init__(self, path, num_classes, seq_len):
self.acc_plot = []
self.conf_mat = None
self.len_plot = None
self.len_stats = None
self.len_buckets = None
self.path = path
self.num_classes = num_classes
self.seq_len = seq_len
#in case you want to record accuracy during training. Not used currently.
def record_val_acc(self, time, acc):
self.acc_plot.append([time, acc])
#data: (x, y) pairs before onehot encoding, pred: predicted class integers
def confusion_matrix(self, data, pred):
self.conf_mat = np.zeros((self.num_classes, self.num_classes), dtype=np.int32)
for i in range(len(data)):
self.conf_mat[data[i][1], pred[i]] += 1
#no longer used, replaced by length_histograms
def length_plot(self, data, pred):
lengths = []
for (x, y) in data:
lengths.append(len(x))
self.len_plot = []
for i in range(len(data)):
correct = 1 if data[i][1] == pred[i] else 0
self.len_plot.append([lengths[i], correct])
def length_stats(self, data, pred):
class_lengths = []
for i in range(self.num_classes):
class_lengths.append([])
for (x, y) in data:
class_lengths[y].append(len(x))
self.len_stats = []
for i in range(self.num_classes):
l = class_lengths[i]
self.len_stats.append([i, np.mean(l), np.median(l), np.std(l), | np.var(l) | numpy.var |
"""
isicarchive.imfunc
This module provides image helper functions and doesn't have to be
imported from outside the main package functionality (IsicApi).
Functions
---------
color_superpixel
Paint the pixels belong to a superpixel list in a specific color
column_period
Guess periodicity of data (image) column
display_image
Display an image (in a Jupyter notebook!)
image_compose
Compose an image from parts
image_corr
Correlate pixel values across two images
image_crop
Crop an image according to coordinates (or superpixel index)
image_dice
Compute DICE coefficient of two images
image_gradient
Compute image gradient (and components)
image_gray
Generate gray-scale version of image
image_mark_border
Mark border pixels of image with encoded content (string, bytes)
image_mark_pixel
Mark pixel in image border
image_mark_work
Mark set of pixels (word) in image border
image_mix
Mix two (RGB or gray) image, with either max or blending
image_overlay
Mix an RGB image with a heatmap overlay (resampled)
image_read_border
Read encoded image border
image_register
Perform rigid-body alignment of images based on gradient
image_resample
Cheap (!) resampling of an image
image_rotate
Rotate an image (ndarray)
lut_lookup
Color lookup from a table (LUT)
segmentation_outline
Extract outline from a segmentation mask image
superpixel_dice
Compute DICE coefficient for superpixel lists
superpixel_neighbors
Generate neighbors lists for each superpixel in an image
superpixel_outlines
Extract superpixel (outline) shapes from superpixel map
superpixel_values
Return the values of a superpixel
write_image
Write an image to file or buffer (bytes)
"""
# specific version for file
__version__ = '0.4.11'
# imports (needed for majority of functions)
from typing import Any, List, Optional, Tuple, Union
import warnings
import numpy
from .vars import ISIC_DICE_SHAPE, ISIC_FUNC_PPI, ISIC_IMAGE_DISPLAY_SIZE_MAX
# color superpixels in an image
def color_superpixels(
image:Union[numpy.ndarray, Tuple],
splst:Union[list, numpy.ndarray],
spmap:numpy.ndarray,
color:Union[list, numpy.ndarray],
alpha:Union[float, numpy.float, list, numpy.ndarray] = 1.0,
almap:numpy.ndarray = None,
spval:Union[float, numpy.float, list, numpy.ndarray, None] = None,
copy_image:bool = False) -> numpy.ndarray:
"""
Paint the pixels belong to a superpixel list in a specific color.
Parameters
----------
image : numpy.ndarray or 2- or 3-element Tuple with image size
Image to be colored, if shape tuple, will be all 0 (black)
splst : list or flat numpy.ndarray
List of superpixels to color in the image
spmap : numpy.ndarray
Mapping array from func.superpixels_map(...)
color : either a list or numpy.ndarray
RGB Color code or list of codes to use to color superpixels
alpha : either float or numpy.float value or None
Alpha (opacity) value between 0.0 and 1.0, if None, set to 1.0
spval : optional numpy.ndarray
Per-pixel opacity value (e.g. confidence, etc.)
copy_image : bool
Copy the input image prior to painting, default: False
Returns
-------
image : numpy.ndarray
Image with superpixels painted
"""
# check inputs
if isinstance(image, tuple):
if len(image) == 2 and (isinstance(image[0], int) and
isinstance(image[1], int)):
im_shape = image
image = numpy.zeros(image[0] * image[1], dtype=numpy.uint8)
elif len(image) == 3 and (isinstance(image[0], int) and
isinstance(image[1], int) and isinstance(image[2], int) and
(image[2] == 1 or image[2] == 3)):
im_shape = image
image = numpy.zeros(image[0] * image[1] * image[2],
dtype=numpy.uint8).reshape((image[0] * image[1], image[2]))
else:
raise ValueError('Invalid image shape.')
copy_image = False
else:
im_shape = image.shape
num_cols = im_shape[1]
has_almap = False
if not almap is None:
if almap.size != (im_shape[0] * im_shape[1]):
raise ValueError('Invalid alpha map.')
has_almap = True
am_shape = almap.shape
try:
almap.shape = (almap.size,)
except:
raise
if copy_image:
image = numpy.copy(image)
if len(im_shape) == 3 or im_shape[1] > 3:
planes = im_shape[2] if len(im_shape) == 3 else 1
else:
if len(im_shape) > 1:
planes = im_shape[1]
else:
planes = 1
image.shape = (im_shape[0] * im_shape[1], planes)
has_alpha = False
if planes > 3:
planes = 3
has_alpha = True
numsp = len(splst)
if spval is None:
spval = numpy.ones(numsp, dtype=numpy.float32)
elif isinstance(spval, float) or isinstance(spval, numpy.float):
spval = spval * numpy.ones(numsp, dtype=numpy.float32)
elif len(spval) != numsp:
spval = numpy.ones(numsp, dtype=numpy.float32)
if len(color) == 3 and isinstance(color[0], int):
color = [color] * numsp
if alpha is None:
alpha = 1.0
if isinstance(alpha, float):
alpha = [alpha] * numsp
if isinstance(alpha, list):
if len(alpha) != numsp:
raise ValueError('alpha list must match number of superpixels')
sp_skip = 6.0 * numpy.trunc(0.75 + 0.25 * numpy.sqrt([
im_shape[0] * im_shape[1] / spmap.shape[0]]))[0]
# for each superpixel (index)
for idx in range(numsp):
# get pixel indices, compute inverse alpha, and then set pixel values
spcol = color[idx]
singlecol = False
num_colors = 1
if isinstance(spcol, list):
if isinstance(spcol[0], int):
singlecol = True
else:
num_colors = len(spcol)
elif isinstance(spcol, numpy.ndarray):
if spcol.size == 3:
singlecol = True
else:
num_colors = spcol.shape[0]
if num_colors > 6:
num_colors = 6
spalpha = alpha[idx]
if isinstance(spalpha, float) and not singlecol:
spalpha = [spalpha] * num_colors
spidx = splst[idx]
spnum = spmap[spidx, -1]
sppidx = spmap[spidx, 0:spnum]
if singlecol:
spalpha = spalpha * spval[idx]
spinv_alpha = 1.0 - spalpha
for p in range(planes):
if spalpha == 1.0:
image[sppidx, p] = spcol[p]
else:
image[sppidx, p] = numpy.round(
spalpha * spcol[p] + spinv_alpha * image[sppidx, p])
if has_alpha:
image[sppidx, 3] = numpy.round(255.0 * 1.0 -
(1.0 - 255.0 * image[sppidx, 3]) *
(1.0 - 255.0 * spalpha))
elif has_almap:
almap[sppidx] = 1.0 - (1.0 - almap[sppidx]) * spinv_alpha
else:
sppval = spval[idx]
if not (isinstance(sppval, list) or isinstance(sppval, numpy.ndarray)):
sppval = [sppval] * num_colors
elif len(sppval) < num_colors:
sppval = [sppval[0]] * num_colors
sppidxx = sppidx % num_cols
sppidxy = sppidx // num_cols
float_num = float(num_colors)
spcidx = numpy.trunc(0.5 + (sppidxx + sppidxy).astype(numpy.float) *
(float_num / sp_skip)).astype(numpy.int32) % num_colors
for cc in range(num_colors):
spcsel = spcidx == cc
spcidxxy = sppidxx[spcsel] + sppidxy[spcsel] * num_cols
spccol = spcol[cc]
spcalpha = spalpha[cc] * sppval[cc]
spinv_alpha = 1.0 - spcalpha
for p in range(planes):
if spcalpha == 1.0:
image[spcidxxy, p] = spccol[p]
else:
image[spcidxxy, p] = numpy.round(
spcalpha * spccol[p] + spinv_alpha * image[spcidxxy, p])
if has_alpha:
image[spcidxxy, 3] = numpy.round(255.0 * 1.0 -
(1.0 - 255.0 * image[spcidxxy, 3]) *
(1.0 - 255.0 * spcalpha))
elif has_almap:
almap[spcidxxy] = 1.0 - (1.0 - almap[spcidxxy]) * spinv_alpha
image.shape = im_shape
if has_almap:
almap.shape = am_shape
return image
# column period
def column_period(c:numpy.ndarray, thresh:int=0):
"""
Guess the periodicity of a column of (image) data
Parameters
----------
c : ndarray
Column of data (e.g. pixel values)
thresh : int
Optional threshold (default: 0)
Returns
-------
p : int (or float)
Guessed periodicity
"""
cc = numpy.zeros(c.size//2)
for ck in range(1, cc.size):
cc[ck] = numpy.corrcoef(c[:-ck],c[ck:])[0,1]
cc[numpy.isnan(cc)] = 0.0
ccc = numpy.zeros(cc.size//2)
for ck in range(3, ccc.size):
ccc[ck-1] = numpy.corrcoef(cc[1:-ck], cc[ck:-1])[0,1]
ccc[numpy.isnan(ccc)] = -1.0
ccs = numpy.argsort(-ccc)
ccsv = numpy.median(ccc[ccs[0:3]]) * 0.816
ccsl = numpy.sort(ccs[ccc[ccs]>=ccsv])
while thresh > 0 and len(ccsl) > 1 and ccsl[0] < thresh:
ccsl = ccsl[1:]
if len(ccsl) == 1:
return ccsl[0]
while len(ccsl) > 3 and ccsl[0] < ccsl[1] // 3:
ccsl = ccsl[1:]
ccsy = ccsl[-1]
ccsx = ccsl[0]
ccsr = ccsy % ccsx
if ccsr == 0:
return ccsx
if ccsx - ccsr < (ccsx // 4):
ccsr = ccsx - ccsr
if ccsr < (ccsx // 4) and ccsx >= 6 and len(ccsl) > 3:
ccst = ccsl.astype(numpy.float64) / float(ccsx)
ccsi = numpy.trunc(ccst + 0.5)
ccsd = float(ccsx) * (ccst - ccsi)
ccsx = float(ccsx) + numpy.sum(ccsd) / numpy.sum(ccsi)
return ccsx
while ccsy % ccsx != 0:
(ccsy, ccsx) = (ccsx, ccsy % ccsx)
return ccsx
# display image
def display_image(
image_data:Union[bytes, str, numpy.ndarray],
image_shape:Tuple = None,
max_size:int = ISIC_IMAGE_DISPLAY_SIZE_MAX,
library:str = 'matplotlib',
ipython_as_object:bool = False,
mpl_axes:object = None,
**kwargs,
) -> Optional[object]:
"""
Display image in a Jupyter notebook; supports filenames, bytes, arrays
Parameters
----------
image_data : bytes, str, ndarray/imageio Array
Image specification (file data, filename, or image array)
image_shape : tuple
Image shape (necessary if flattened array!)
max_size : int
Desired maximum output size on screen
library : str
Either 'matplotlib' (default) or 'ipython'
mpl_axes : object
Optional existing matplotlib axes object
No returns
"""
# IMPORT DONE HERE TO SAVE TIME AT MODULE INIT
import imageio
# check inputs
if image_data is None:
return
if not isinstance(library, str):
raise ValueError('Invalid library selection.')
library = library.lower()
if not library in ['ipython', 'matplotlib']:
raise ValueError('Invalid library selection.')
if (isinstance(image_data, numpy.ndarray) or
isinstance(image_data, imageio.core.util.Array)):
if library == 'ipython':
try:
image_data = write_image(image_data, 'buffer', 'jpg')
except:
raise
elif isinstance(image_data, str) and (len(image_data) < 256):
try:
with open(image_data, 'rb') as image_file:
image_data = image_file.read()
except:
raise
if library == 'matplotlib' and isinstance(image_data, bytes):
try:
image_data = imageio.imread(image_data)
except:
raise
if not isinstance(max_size, int) or (max_size < 32) or (max_size > 5120):
max_size = ISIC_IMAGE_DISPLAY_SIZE_MAX
if image_shape is None:
try:
if library == 'ipython':
image_array = imageio.imread(image_data)
image_shape = image_array.shape
else:
image_shape = image_data.shape
except:
raise
image_height = image_shape[0]
image_width = image_shape[1]
image_max_xy = max(image_width, image_height)
shrink_factor = max(1.0, image_max_xy / max_size)
image_width = int(image_width / shrink_factor)
image_height = int(image_height / shrink_factor)
# depending on library call appropriate function
if library == 'ipython':
# IMPORT DONE HERE TO SAVE TIME BETWEEN LIBRARY CHOICES
from ipywidgets import Image as ipy_Image
from IPython.display import display as ipy_display
try:
image_out = ipy_Image(value=image_data,
width=image_width, height=image_height)
if not ipython_as_object:
ipy_display(image_out)
return None
return image_out
except Exception as e:
warnings.warn('Problem producing image for display: ' + str(e))
return None
else:
# IMPORT DONE HERE TO SAVE TIME BETWEEN LIBRARY CHOICES
import matplotlib
import matplotlib.pyplot as mpl_pyplot
try:
display_width = image_width / ISIC_FUNC_PPI
display_height = image_height / ISIC_FUNC_PPI
if mpl_axes is None:
if 'figsize' in kwargs:
mpl_pyplot.figure(figsize=kwargs['figsize'])
else:
mpl_pyplot.figure(figsize=(display_width, display_height))
ax_img = mpl_pyplot.imshow(image_data,
interpolation='hanning')
ax_img.axes.set_axis_off()
mpl_pyplot.show()
else:
mpl_axes.imshow(image_data)
except Exception as e:
warnings.warn('Problem producing image for display: ' + str(e))
return None
# image center ([y,x coord] * 0.5)
def image_center(image:numpy.ndarray) -> numpy.ndarray:
try:
imsh = image.shape
return 0.5 * numpy.asarray([imsh[0], imsh[1]]).astype(numpy.float64)
except:
raise
# image composition (from other images)
def image_compose(
imlist:list,
outsize:Tuple,
bgcolor:list = [255,255,255],
) -> numpy.ndarray:
"""
Compose image from parts
Parameters
----------
imlist : list
List of image parts, each element a 3-element list with
image (ndarray), x- and y-position in the output image
outsize : Tuple
Size of output image
bgcolor : list
3-element list, default: [255, 255, 255] (white)
Returns
-------
out_image : ndarray
Output image composed of input images
"""
if not isinstance(outsize, tuple) and not isinstance(outsize, list):
raise ValueError('Invalid outsize parameter.')
if (len(outsize) != 2 or not isinstance(outsize[0], int) or
not isinstance(outsize[1], int) or outsize[0] < 1 or
outsize[1] < 1 or (outsize[0] * outsize[2] > 16777216)):
raise ValueError('Invalid image dimensions in outsize parameter.')
# generate output
out = numpy.zeros(3 * outsize[0] * outsize[1], dtype=numpy.uint8).reshape(
(outsize[1], outsize[0], 3,))
im_shape = out.shape
# set background color
if (isinstance(bgcolor, tuple) or isinstance(bgcolor, list)) and len(bgcolor) == 3:
try:
out[:,:,0] = bgcolor[0]
except:
pass
try:
out[:,:,1] = bgcolor[1]
except:
pass
try:
out[:,:,2] = bgcolor[2]
except:
pass
# iterare over particles
for ii in imlist:
# if not a minimally formatted list
if not isinstance(ii, list) or len(ii) < 3:
continue
# get image and inupt shape, check dims
ii_image = ii[0]
ii_shape = ii_image.shape
if len(ii_shape) < 2 or len(ii_shape) > 3:
continue
elif len(ii_shape) == 3 and not ii_shape[2] in [1, 3]:
continue
# get target position (top left)
ii_x = ii[1]
ii_y = ii[2]
if ii_x >= im_shape[1] or ii_y >= im_shape[0]:
continue
# and process alpha
if len(ii) == 3:
ii_alpha = 1.0
else:
ii_alpha = ii[3]
if not (isinstance(ii_alpha, float) or isinstance(ii_alpha, numpy.ndarray)):
continue
if isinstance(ii_alpha, float):
if ii_alpha <= 0.0:
continue
if ii_alpha > 1.0:
ii_alpha = 1.0
else:
if ii_alpha.ndim != 2:
continue
if ii_alpha.shape[0] != im_shape[0] or ii_alpha.shape[1] != im_shape[1]:
continue
ii_alpha[ii_alpha < 0.0] = 0.0
ii_alpha[ii_alpha > 1.0] = 1.0
# resizing of image
if len(ii) > 5 and ((isinstance(ii[4], int) and isinstance(ii[5], int)) or
(isinstance(ii[4], float) and isinstance(ii[5], float))):
from .sampler import Sampler
s = Sampler()
if isinstance(ii_alpha, numpy.ndarray):
ii_alpha = s.sample_grid(ii_alpha, ii[4:6], 'linear')
if len(ii) > 6 and isinstance(ii[6], str):
ikern = ii[6]
else:
ikern = 'cubic'
ii_image = s.sample_grid(ii_image, ii[4:6], ikern)
im_shape = ii_image.shape
# check arguments for compatibility
if not (isinstance(ii_image, numpy.ndarray) and
isinstance(ii_x, int) and isinstance(ii_y, int) and
(isinstance(ii_alpha, float) or (
isinstance(ii_alpha, numpy.ndarray) and
ii_alpha.ndim == 2 and ii_alpha.shape[0] == ii_image.shape[0]))):
continue
sfrom_x = 0
sfrom_y = 0
sto_x = ii_shape[1]
sto_y = ii_shape[0]
tfrom_x = ii_x
tfrom_y = ii_y
if tfrom_x < 0:
sfrom_x -= tfrom_x
tfrom_x = 0
if tfrom_y < 0:
sfrom_y -= tfrom_y
tfrom_y = 0
from_x = sto_x - sfrom_x
from_y = sto_y - sfrom_y
if from_x <= 0 or from_y <= 0:
continue
tto_x = tfrom_x + from_x
tto_y = tfrom_y + from_y
if tto_x > im_shape[1]:
shrink = tto_x - im_shape[1]
tto_x -= shrink
sto_x -= shrink
if tto_y > im_shape[0]:
shrink = tto_y - im_shape[0]
tto_y -= shrink
sto_y -= shrink
if tto_x <= tfrom_x or tto_y <= tfrom_y:
continue
if len(ii_shape) == 2:
if sfrom_x == 0 and sfrom_y == 0 and sto_x == ii_shape[1] and sto_y == ii_shape[0]:
out[tfrom_y:tto_y, tfrom_x:tto_x, :] = image_mix(
out[tfrom_y:tto_y, tfrom_x:tto_x, :], ii_image, ii_alpha)
else:
out[tfrom_y:tto_y, tfrom_x:tto_x, :] = image_mix(
out[tfrom_y:tto_y, tfrom_x:tto_x, :],
ii_image[sfrom_y:sto_y, sfrom_x:sto_x], ii_alpha)
else:
if sfrom_x == 0 and sfrom_y == 0 and sto_x == ii_shape[1] and sto_y == ii_shape[0]:
out[tfrom_y:tto_y, tfrom_x:tto_x, :] = image_mix(
out[tfrom_y:tto_y, tfrom_x:tto_x, :], ii_image, ii_alpha)
else:
out[tfrom_y:tto_y, tfrom_x:tto_x, :] = image_mix(
out[tfrom_y:tto_y, tfrom_x:tto_x, :],
ii_image[sfrom_y:sto_y, sfrom_x:sto_x, :], ii_alpha)
return out
# image correlation (pixel values)
def image_corr(
im1:numpy.ndarray,
im2:numpy.ndarray,
immask:numpy.ndarray = None,
) -> float:
"""
Correlate pixel values for two images
Parameters
----------
im1, im2 : ndarray
Image arrays (of same size!)
immask : ndarray
Optional masking array (in which case only over those pixels)
Returns
-------
ic : float
Correlation coefficient
"""
if im1.size != im2.size:
raise ValueError('Images must match in size.')
if immask is None:
cc = numpy.corrcoef(im1.reshape(im1.size), im2.reshape(im2.size))
else:
if immask.size != im1.size:
immask = image_resample(numpy.uint8(255) * immask.astype(numpy.uint8),
(im1.shape[0], im1.shape[1])) >= 128
if immask.dtype != numpy.bool:
immask = (immask > 0)
cc = numpy.corrcoef(im1[immask], im2[immask])
return cc[0,1]
# crop image
def image_crop(
image:numpy.ndarray,
cropping:Any,
padding:int = 0,
masking:str = None,
spmap:numpy.ndarray = None,
spnei:List = None,
spnei_degree:int = 1,
) -> numpy.ndarray:
"""
Crops an image to a rectangular region of interest.
Parameters
----------
image : ndarray
Image (2D or 2D-3) array
cropping : Any
Cropping selection, either of
- [y0, x0, y1, x1] rectangle (y1/x1 non inclusive)
- int(S), superpixel index, requires spmap!
padding : int
Additional padding around cropping in pixels
masking : str
Masking operation, if requested, either of
'smoothnei' - smooth the neighboring region
spmap : ndarray
Superpixel mapping array
spnei : list
Superpixel (list of) list(s) of neighbors
spnei_degree : int
How many degrees of neighbors to include (default: 1)
"""
im_shape = image.shape
if not isinstance(padding, int) or padding < 0:
padding = 0
if isinstance(cropping, list) and len(cropping) == 4:
y0 = max(0, cropping[0]-padding)
x0 = max(0, cropping[1]-padding)
y1 = min(im_shape[0], cropping[2]+padding)
x1 = min(im_shape[1], cropping[2]+padding)
elif isinstance(cropping, int) and cropping >= 0:
if spmap is None or not isinstance(spmap, numpy.ndarray):
raise ValueError('Missing spmap parameter.')
spidx = cropping
sppix = spmap[spidx,:spmap[spidx,-1]]
sppiy = sppix // im_shape[1]
sppix = sppix % im_shape[1]
y0 = max(0, numpy.amin(sppiy)-padding)
x0 = max(0, numpy.amin(sppix)-padding)
y1 = min(im_shape[0], numpy.amax(sppiy)+padding)
x1 = min(im_shape[1], numpy.amax(sppix)+padding)
yd = y1 - y0
xd = x1 - x0
dd = (yd + xd) // 2
if isinstance(spnei, list):
if len(spnei) > 8:
spnei = [spnei]
if not isinstance(spnei_degree, int) or spnei_degree < 1:
spnei_degree = 0
elif spnei_degree > len(spnei):
spnei_degree = len(spnei) - 1
else:
spnei_degree -= 1
spnei = spnei[spnei_degree]
try:
nei = spnei[spidx]
for n in nei:
sppix = spmap[n,:spmap[n,-1]]
sppiy = sppix // im_shape[1]
sppix = sppix % im_shape[1]
y0 = min(y0, max(0, numpy.amin(sppiy)-padding))
x0 = min(x0, max(0, numpy.amin(sppix)-padding))
y1 = max(y1, min(im_shape[0], numpy.amax(sppiy)+padding))
x1 = max(x1, min(im_shape[1], numpy.amax(sppix)+padding))
except:
raise
if isinstance(masking, str) and masking == 'smoothnei':
from .sampler import Sampler
s = Sampler()
yd = y1 - y0
xd = x1 - x0
try:
if len(im_shape) > 2:
ci = image[y0:y1,x0:x1,:]
else:
ci = image[y0:y1,x0:x1]
cim = numpy.zeros(yd * xd).reshape((yd,xd,))
cim[yd//2, xd//2] = 1.0
cims = s.sample_grid(cim, 1.0, 'gauss' + str(dd))
cims /= | numpy.amax(cims) | numpy.amax |
import unittest
import numpy as np
from sklearn.datasets import (
load_breast_cancer,
load_iris
)
from msitrees._core import (
gini_impurity,
gini_information_gain,
entropy,
get_class_and_proba,
classif_best_split
)
class TestGiniImpurity(unittest.TestCase):
def test_input_type_list(self):
try:
gini_impurity([0, 0])
except TypeError:
self.fail('Exception on allowed input type - list')
def test_input_type_tuple(self):
try:
gini_impurity((0, 0))
except TypeError:
self.fail('Exception on allowed input type - tuple')
def test_input_type_numpy(self):
try:
gini_impurity(np.array([0, 0]))
except TypeError:
self.fail('Exception on allowed input type - np.ndarray')
def test_input_int(self):
with self.assertRaises(ValueError):
gini_impurity(0)
def test_input_other(self):
with self.assertRaises(TypeError):
gini_impurity('foo')
with self.assertRaises(TypeError):
gini_impurity({'foo': 1})
def test_input_wrong_shape(self):
with self.assertRaises(ValueError):
gini_impurity( | np.array([[1, 0], [1, 0]]) | numpy.array |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict, Any
import soundfile as sf
import librosa
import numpy as np
import argparse
import yaml
import json
import jsonlines
import concurrent.futures
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from pathlib import Path
import tqdm
from operator import itemgetter
from praatio import tgio
import logging
from config import get_cfg_default
def logmelfilterbank(audio,
sr,
n_fft=1024,
hop_length=256,
win_length=None,
window="hann",
n_mels=80,
fmin=None,
fmax=None,
eps=1e-10):
"""Compute log-Mel filterbank feature.
Parameters
----------
audio : ndarray
Audio signal (T,).
sr : int
Sampling rate.
n_fft : int
FFT size. (Default value = 1024)
hop_length : int
Hop size. (Default value = 256)
win_length : int
Window length. If set to None, it will be the same as fft_size. (Default value = None)
window : str
Window function type. (Default value = "hann")
n_mels : int
Number of mel basis. (Default value = 80)
fmin : int
Minimum frequency in mel basis calculation. (Default value = None)
fmax : int
Maximum frequency in mel basis calculation. (Default value = None)
eps : float
Epsilon value to avoid inf in log calculation. (Default value = 1e-10)
Returns
-------
np.ndarray
Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(
audio,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
pad_mode="reflect")
spc = np.abs(x_stft) # (#bins, #frames,)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sr / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sr, n_fft, n_mels, fmin, fmax)
return np.log10(np.maximum(eps, | np.dot(mel_basis, spc) | numpy.dot |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LSTM layer."""
# pylint: disable=g-direct-tensorflow-import
import copy
import os
import shutil
from absl.testing import parameterized
import keras
from keras.layers.rnn import gru_lstm_utils
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import np_utils
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import test_util as tf_test_util
# Global config for grappler setting that is used for graph mode test.
_rewrites = rewriter_config_pb2.RewriterConfig()
_rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
_rewrites.min_graph_nodes = -1
_graph_options = tf.compat.v1.GraphOptions(rewrite_options=_rewrites)
_config = tf.compat.v1.ConfigProto(graph_options=_graph_options)
@test_combinations.run_all_keras_modes(config=_config)
class LSTMGraphRewriteTest(test_combinations.TestCase):
input_shape = 10
output_shape = 8
rnn_state_size = 8
timestep = 4
batch = 100
epoch = 1
@parameterized.named_parameters(
('non_tan_activation', 'relu', 'sigmoid', 0, False, True),
('non_sigmoid_recur_activation', 'tanh', 'relu', 0, False, True),
('use_recurrent_dropout', 'tanh', 'sigmoid', 0.1, False, True),
('unroll', 'tanh', 'sigmoid', 0, True, True),
('not_use_bias', 'tanh', 'sigmoid', 0, False, False),
)
@test_utils.run_v2_only
def test_could_use_defun_backend(self, activation, recurrent_activation,
recurrent_dropout, unroll, use_bias):
layer = keras.layers.LSTM(
1,
activation=activation,
recurrent_activation=recurrent_activation,
recurrent_dropout=recurrent_dropout,
unroll=unroll,
use_bias=use_bias)
self.assertFalse(layer._could_use_gpu_kernel)
@test_utils.run_v2_only
def test_use_on_default_activation_with_gpu_kernel(self):
layer = keras.layers.LSTM(1, activation=tf.tanh)
self.assertTrue(layer._could_use_gpu_kernel)
layer = keras.layers.LSTM(1, recurrent_activation=tf.sigmoid)
self.assertTrue(layer._could_use_gpu_kernel)
def test_static_shape_inference_LSTM(self):
# Github issue: 15165
timesteps = 3
embedding_dim = 4
units = 2
model = keras.models.Sequential()
inputs = keras.layers.Dense(
embedding_dim, input_shape=(timesteps, embedding_dim))
model.add(inputs)
layer = keras.layers.LSTM(units, return_sequences=True)
model.add(layer)
outputs = model.layers[-1].output
self.assertEqual(outputs.shape.as_list(), [None, timesteps, units])
def test_dynamic_behavior_LSTM(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.LSTM(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(tf.compat.v1.train.GradientDescentOptimizer(0.001), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_stacking_LSTM(self):
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.LSTM(10, return_sequences=True, unroll=False))
model.add(keras.layers.LSTM(5, return_sequences=True, unroll=False))
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
layer_class = keras.layers.LSTM
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_specify_initial_state_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [keras.Input((units,)) for _ in range(num_states)]
layer = keras.layers.LSTM(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
self.assertTrue(
any(initial_state[0] is t
for t in layer._inbound_nodes[0].input_tensors))
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = np.random.random((num_samples, units))
model.train_on_batch([inputs] + initial_state, targets)
def test_specify_initial_state_non_keras_tensor(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
# Test with non-Keras tensor
inputs = keras.Input((timesteps, embedding_dim))
initial_state = [
keras.backend.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)
]
layer = keras.layers.LSTM(units)
output = layer(inputs, initial_state=initial_state)
model = keras.models.Model(inputs, output)
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.train_on_batch(inputs, targets)
def test_reset_states_with_values(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
layer = keras.layers.LSTM(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
initial_weight_count = len(layer.weights)
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.zeros(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [keras.backend.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
self.assertAllClose(
keras.backend.eval(layer.states[0]),
np.ones(keras.backend.int_shape(layer.states[0])),
atol=1e-4)
# Test with invalid data
with self.assertRaises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
self.assertEqual(initial_weight_count, len(layer.weights))
# Variables in "states" shouldn't show up in .weights
layer.states = tf.nest.map_structure(tf.Variable, values)
layer.reset_states()
self.assertEqual(initial_weight_count, len(layer.weights))
def test_specify_state_with_masking(self):
num_states = 2
timesteps = 3
embedding_dim = 4
units = 3
num_samples = 2
inputs = keras.Input((timesteps, embedding_dim))
_ = keras.layers.Masking()(inputs)
initial_state = [keras.Input((units,)) for _ in range(num_states)]
output = keras.layers.LSTM(units)(
inputs, initial_state=initial_state)
model = keras.models.Model([inputs] + initial_state, output)
model.compile(
loss='categorical_crossentropy',
optimizer=tf.compat.v1.train.GradientDescentOptimizer(0.01))
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [
np.random.random((num_samples, units)) for _ in range(num_states)
]
targets = | np.random.random((num_samples, units)) | numpy.random.random |
import roslib
import sys
import rospy
import cv2
import math
import imutils
import statistics
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from std_msgs.msg import Float64MultiArray, Float64
from cv_bridge import CvBridge, CvBridgeError
from scipy.spatial import distance as dist
class image_converter:
# Defines publisher and subscriber
def __init__(self):
# initialize the node named image_processing
rospy.init_node('image_processing', anonymous=True)
# initialize a publisher to send images from camera1 to a topic named image_topic1
self.image_pub1 = rospy.Publisher("image_topic1", Image, queue_size=1)
self.image_pub2 = rospy.Publisher("image_topic2", Image, queue_size=1)
#Initialize a publisher to send joints angular posiion toa topic called joints_pos
self.joints_pub=rospy.Publisher("joints_pos",Float64MultiArray,queue_size=10)
#initialize a publisher for the robot end effector
self.vision_end_effector_pub=rospy.Publisher("vision_end_effector",Float64MultiArray,queue_size=10)
self.fk_end_effector_pub = rospy.Publisher("fk_end_effector", Float64MultiArray, queue_size=10)
self.actual_target_trajectory_pub = rospy.Publisher("actual_target_trajectory", Float64MultiArray,queue_size=10)
self.vision_target_trajectory_pub = rospy.Publisher("vision_target_trajectory", Float64MultiArray,queue_size=10)
#initialize a publisher for the four angles
self.robot_joint1_pub = rospy.Publisher("/robot/joint1_position_controller/command", Float64, queue_size=10)
self.robot_joint2_pub = rospy.Publisher("/robot/joint2_position_controller/command", Float64, queue_size=10)
self.robot_joint3_pub = rospy.Publisher("/robot/joint3_position_controller/command", Float64, queue_size=10)
self.robot_joint4_pub = rospy.Publisher("/robot/joint4_position_controller/command", Float64, queue_size=10)
#Initialize the publisher for t target
self.target_x_pub = rospy.Publisher("/target/x_position_controller/command", Float64, queue_size=10)
self.target_y_pub = rospy.Publisher("/target/y_position_controller/command", Float64, queue_size=10)
self.target_z_pub = rospy.Publisher("/target/z_position_controller/command", Float64, queue_size=10)
# initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data
self.image_sub1 = rospy.Subscriber("/camera1/robot/image_raw", Image, self.callback1)
self.image_sub2 = rospy.Subscriber("/camera2/robot/image_raw", Image, self.callback2)
#initialize a publisher to send desired trajectory
self.time_trajectory = rospy.get_time()
#initialize variables
self.red = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.green = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.p2m = np.array([0.0], dtype='float64')
self.joint1 = np.array([0.0], dtype='float64')
self.joint2 = np.array([0.0], dtype='float64')
self.joint3 = np.array([0.0], dtype='float64')
self.joint4 = np.array([0.0], dtype='float64')
# initialize errors
self.time_previous_step = np.array([rospy.get_time()], dtype='float64')
self.time_previous_step2 = np.array([rospy.get_time()], dtype='float64')
# initialize error and derivative of error for trajectory tracking
self.error = np.array([0.0, 0.0,0.0], dtype='float64')
self.error_d = np.array([0.0, 0.0,0.0], dtype='float64')
# initialize the bridge between openCV and ROS
self.bridge = CvBridge()
# Recieve data from camera 1, process it, and publish
def callback1(self, data):
# Recieve the image
try:
self.image1 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def callback2(self, data):
# Recieve the image
try:
self.image2 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
#Blob detection starts here-------------------------------------------------------
#Same to 2_1_joint_estimation.py
def detect_red(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([0, 200, 0])
higher_red1 = np.array([0, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([0, 200, 0])
higher_red2 = np.array([0, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_blue(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([70, 0, 0])
higher_red1 = np.array([255, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([70, 0, 0])
higher_red2 = np.array([255, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_green(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([55, 0, 0])
higher_red1 = np.array([100, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([55, 0, 0])
higher_red2 = np.array([100, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_yellow(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([16, 244, 0])
higher_red1 = np.array([51, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([16, 244, 0])
higher_red2 = np.array([51, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_blue_contours(image1):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([70, 0, 0])
higher_red1 = np.array([255, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
return np.array([contours1])
def detect_yellow_contours(image1):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([16, 244, 0])
higher_red1 = np.array([51, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy,cz1 = (int(x1), int(y1))
return np.array([contours1])
def get_y1_y2(yellow_contours, blue_contours):
y1 = np.min(yellow_contours, axis = 0)
y1 = y1[0][1]
y1 = y1[:,1]
y2 = np.max(blue_contours, axis = 0)
y2 = y2[0][1]
y2 = y2[:,1]
return y1, y2
def pixelTometer(self, image1,image2):
yellow_contours = detect_yellow_contours(image2)
blue_contours = detect_blue_contours(image2)
y2 = detect_blue(self, image1, image2)
y2 = y2[3]
y1, y2 = get_y1_y2(yellow_contours, blue_contours)
p2m = 2.5/(y1 - y2)
#65 is the best number
return p2m
#----------------------------------------------------------------------------------------------
#Angle Detection starts here
#This part is same as 2_1_joint_estimation.py
def detect_angles_blob(self,image1,image2):
try:
p=pixelTometer(self,image1,image2)
self.p2m = p
except Exception as e:
p = self.p2m
try:
green = detect_green(self, image1, image2)
self.green = green
except Exception as e:
green = self.green
try:
red = detect_red(self, image1, image2)
self.red = red
except Exception as e:
red = self.red
p=pixelTometer(self,image1,image2)
yellow=p*detect_yellow(self,image1,image2)
blue=p*detect_blue(self,image1,image2)
ja1=0.0
ja2=np.pi/2-np.arctan2((blue[2] - green[2]), (blue[1] - green[1]))
ja3 = np.arctan2((blue[3] - green[3]), (blue[0] - green[0]))-np.pi/2
ja4 = np.arctan2((green[2] - red[2]), -(green[1] - red[1]))-np.pi/2-ja2
return np.array([ja1,ja2,ja3,ja4])
def angle_trajectory(self):
curr_time = np.array([rospy.get_time() - self.time_trajectory])
ja1 = 0.1
ja2 = float((np.pi / 2) * np.sin((np.pi / 15) * curr_time))
ja3 = float((np.pi / 2) * np.sin((np.pi / 18) * curr_time))
ja4 = float((np.pi / 2) * np.sin((np.pi / 20) * curr_time))
return np.array([ja1, ja2, ja3, ja4])
def actual_target_position(self):
curr_time = np.array([rospy.get_time() - self.time_trajectory])
x_d = float((2.5 * np.cos(curr_time * np.pi / 15))+0.5)
y_d = float(2.5 * np.sin(curr_time * np.pi / 15))
z_d = float((1 * np.sin(curr_time * np.pi / 15))+7.0)
return np.array([x_d,y_d,z_d])
#FK starts here--------------------------------------------------------------------------------
#This part is same as 3_1_FK.py
def end_effector_position(self, image1, image2):
try:
p=pixelTometer(self,image1,image2)
self.p2m = p
except Exception as e:
p = self.p2m
yellow_posn = detect_yellow(self,image1, image2)
red_posn = detect_red(self, image1, image2)
yellow_posn[3] = 800 - yellow_posn[3]
red_posn[3] = 800 - red_posn[3]
cx, cy, cz1, cz2 = p * (red_posn - yellow_posn)
ee_posn = np.array([cx, cy, cz2])
ee_posn = np.round(ee_posn,1)
return ee_posn
#Calculate the jacobian
def calculate_jacobian(self,image1,image2):
ja1,ja2,ja3,ja4=detect_angles_blob(self,image1,image2)
jacobian=np.array([[3*np.cos(ja1)*np.sin(ja2)*np.cos(ja3)*np.cos(ja4)
+3.5*np.cos(ja1)*np.sin(ja2)*np.cos(ja3)
-3*np.sin(ja1)*np.cos(ja4)*np.sin(ja3)
-3.5*np.sin(ja1)*np.sin(ja3)
+3*np.cos(ja1)*np.cos(ja2)*np.sin(ja4),
3*np.sin(ja1)*np.cos(ja2)*np.cos(ja3)*np.cos(ja4)
+3.5*np.sin(ja1)*np.cos(ja2)*np.cos(ja3)
-3*np.sin(ja1)*np.sin(ja2)*np.sin(ja4),
-3*np.sin(ja1)*np.sin(ja2)*np.sin(ja3)*np.cos(ja4)
-3.5*np.sin(ja1)*np.sin(ja2)*np.sin(ja3)
+3*np.cos(ja1)*np.cos(ja4)*np.cos(ja3)
+3.5*np.cos(ja1)*np.cos(ja3),
-3*np.sin(ja1)*np.sin(ja2)*np.cos(ja3)*np.sin(ja4)
-3*np.cos(ja1)*np.sin(ja4)*np.sin(ja3)
+3*np.sin(ja1)*np.cos(ja2)*np.cos(ja4)
],
[
3*np.sin(ja1)*np.sin(ja2)*np.cos(ja3)*np.cos(ja4)
+3.5*np.sin(ja1)*np.sin(ja2)*np.cos(ja3)
+3*np.cos(ja1)*np.cos(ja4)*np.sin(ja3)
+3.5*np.cos(ja1)*np.sin(ja3)
+3*np.sin(ja1)*np.cos(ja2)*np.sin(ja4),
-3*np.cos(ja1)*np.cos(ja2)*np.cos(ja3)*np.cos(ja4)
-3.5*np.cos(ja1)*np.cos(ja2)*np.cos(ja3)
+3*np.cos(ja1)*np.sin(ja2)*np.sin(ja4),
+3*np.cos(ja1)*np.sin(ja2)*np.sin(ja3)*np.cos(ja4)
+3.5*np.cos(ja1)*np.sin(ja2)*np.sin(ja3)
+3*np.sin(ja1)*np.cos(ja4)*np.cos(ja3)
+3.5*np.sin(ja1)*np.cos(ja3),
+3*np.cos(ja1)*np.sin(ja2)*np.cos(ja3)*np.sin(ja4)
-3*np.sin(ja1)*np.sin(ja4)*np.sin(ja3)
-3*np.cos(ja1)*np.cos(ja2)*np.cos(ja4)
],
[ 0,
-3*np.cos(ja3)*np.cos(ja4)*np.sin(ja2)
-3.5*np.cos(ja3)*np.sin(ja2)
-3*np.sin(ja4)*np.cos(ja2),
-3*np.sin(ja3)*np.cos(ja4)*np.cos(ja2)
-3.5*np.sin(ja3)*np.cos(ja2),
-3*np.cos(ja3)*np.sin(ja4)*np.cos(ja2)
-3*np.cos(ja4)* | np.sin(ja2) | numpy.sin |
import matplotlib.pyplot as plt
import numpy as np
import scipy
import scipy.io
def feature_normalize(samples):
"""
Feature-normalize samples
:param samples: samples.
:return: normalized feature
"""
means = np.mean(samples,axis=0)
X_normalized = samples - means
std = np.std(samples,axis=0,ddof=0)
X_normalized = X_normalized/std
return X_normalized
def get_usv(sample_norm):
m = sample_norm.shape[0]
Sigma = (1/m)*np.matmul(sample_norm.T,sample_norm)
U,S,V = scipy.linalg.svd(Sigma)
return U,S,V
def project_data(samples, U, K):
"""
Computes the reduced data representation when
projecting only on to the top "K" eigenvectors
"""
# Reduced U is the first "K" columns in U
reduced_U = U[:,0:K]
reduced_samples = np.matmul(samples,reduced_U)
return reduced_samples
def recover_data(Z, U, K):
recovered_sample = | np.matmul(Z,U[:,0:K].T) | numpy.matmul |
import os
import numpy as np
import random
import torch
import torch.utils.data as dataf
import torch.nn as nn
import matplotlib.pyplot as plt
from scipy import io
from sklearn.decomposition import PCA
# setting parameters
DataPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/Houston.mat'
TRPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TRLabel.mat'
TSPath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/Houston/TSLabel.mat'
savepath = '/home/hrl/PycharmProjects/untitled/Hyperspectral/Data/FixedTrainSam/W3-DLSection/HU2013/2DCNN-14.mat'
patchsize = 16 # input spatial size for 2D-CNN
batchsize = 128 # select from [16, 32, 64, 128], the best is 64
EPOCH = 200
LR = 0.001
# load data
Data = io.loadmat(DataPath)
TrLabel = io.loadmat(TRPath)
TsLabel = io.loadmat(TSPath)
Data = Data['Houston']
Data = Data.astype(np.float32)
TrLabel = TrLabel['TRLabel']
TsLabel = TsLabel['TSLabel']
# without dimensionality reduction
pad_width = np.floor(patchsize/2)
pad_width = np.int(pad_width)
# normalization method 2: map to zero mean and one std
[m, n, l] = np.shape(Data)
# x2 = np.empty((m+pad_width*2, n+pad_width*2, l), dtype='float32')
for i in range(l):
mean = np.mean(Data[:, :, i])
std = np.std(Data[:, :, i])
Data[:, :, i] = (Data[:, :, i] - mean)/std
# x2[:, :, i] = np.pad(Data[:, :, i], pad_width, 'symmetric')
# # extract the first principal component
# x = np.reshape(Data, (m*n, l))
# pca = PCA(n_components=0.995, copy=True, whiten=False)
# x = pca.fit_transform(x)
# _, l = x.shape
# x = np.reshape(x, (m, n, l))
# # print x.shape
# # plt.figure()
# # plt.imshow(x)
# # plt.show()
x = Data
# boundary interpolation
temp = x[:,:,0]
pad_width = np.floor(patchsize/2)
pad_width = | np.int(pad_width) | numpy.int |
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
import numpy as np
import json
import os
import sys
import cv2
import copy
import paddlex.utils.logging as logging
# fix linspace problem for pycocotools while numpy > 1.17.2
backup_linspace = np.linspace
def fixed_linspace(start,
stop,
num=50,
endpoint=True,
retstep=False,
dtype=None,
axis=0):
num = int(num)
return backup_linspace(start, stop, num, endpoint, retstep, dtype, axis)
def eval_results(results,
metric,
coco_gt,
with_background=True,
resolution=None,
is_bbox_normalized=False,
map_type='11point'):
"""Evaluation for evaluation program results"""
box_ap_stats = []
coco_gt_data = copy.deepcopy(coco_gt)
eval_details = {'gt': copy.deepcopy(coco_gt.dataset)}
if metric == 'COCO':
np.linspace = fixed_linspace
if 'proposal' in results[0]:
proposal_eval(results, coco_gt_data)
if 'bbox' in results[0]:
box_ap_stats, xywh_results = coco_bbox_eval(
results,
coco_gt_data,
with_background,
is_bbox_normalized=is_bbox_normalized)
if 'mask' in results[0]:
mask_ap_stats, segm_results = mask_eval(results, coco_gt_data,
resolution)
ap_stats = [box_ap_stats, mask_ap_stats]
eval_details['bbox'] = xywh_results
eval_details['mask'] = segm_results
return ap_stats, eval_details
np.linspace = backup_linspace
else:
if 'accum_map' in results[-1]:
res = np.mean(results[-1]['accum_map'][0])
logging.debug('mAP: {:.2f}'.format(res * 100.))
box_ap_stats.append(res * 100.)
elif 'bbox' in results[0]:
box_ap, xywh_results = voc_bbox_eval(
results,
coco_gt_data,
with_background,
is_bbox_normalized=is_bbox_normalized,
map_type=map_type)
box_ap_stats.append(box_ap)
eval_details['bbox'] = xywh_results
return box_ap_stats, eval_details
def proposal_eval(results, coco_gt, outputfile, max_dets=(100, 300, 1000)):
assert 'proposal' in results[0]
assert outfile.endswith('.json')
xywh_results = proposal2out(results)
assert len(
xywh_results) > 0, "The number of valid proposal detected is zero.\n \
Please use reasonable model and check input data."
with open(outfile, 'w') as f:
json.dump(xywh_results, f)
cocoapi_eval(xywh_results, 'proposal', coco_gt=coco_gt, max_dets=max_dets)
# flush coco evaluation result
sys.stdout.flush()
def coco_bbox_eval(results,
coco_gt,
with_background=True,
is_bbox_normalized=False):
assert 'bbox' in results[0]
from pycocotools.coco import COCO
cat_ids = coco_gt.getCatIds()
# when with_background = True, mapping category to classid, like:
# background:0, first_class:1, second_class:2, ...
clsid2catid = dict(
{i + int(with_background): catid
for i, catid in enumerate(cat_ids)})
xywh_results = bbox2out(
results, clsid2catid, is_bbox_normalized=is_bbox_normalized)
results = copy.deepcopy(xywh_results)
if len(xywh_results) == 0:
logging.warning(
"The number of valid bbox detected is zero.\n Please use reasonable model and check input data.\n stop eval!"
)
return [0.0], results
map_stats = cocoapi_eval(xywh_results, 'bbox', coco_gt=coco_gt)
# flush coco evaluation result
sys.stdout.flush()
return map_stats, results
def loadRes(coco_obj, anns):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
from pycocotools.coco import COCO
import pycocotools.mask as maskUtils
import time
res = COCO()
res.dataset['images'] = [img for img in coco_obj.dataset['images']]
tic = time.time()
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(coco_obj.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set(
[ann['image_id'] for ann in anns])
res.dataset['images'] = [
img for img in res.dataset['images'] if img['id'] in imgIds
]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(
coco_obj.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1 - x0) * (y1 - y0)
ann['id'] = id + 1
ann['bbox'] = [x0, y0, x1 - x0, y1 - y0]
res.dataset['annotations'] = anns
res.createIndex()
return res
def mask_eval(results, coco_gt, resolution, thresh_binarize=0.5):
assert 'mask' in results[0]
from pycocotools.coco import COCO
clsid2catid = {i + 1: v for i, v in enumerate(coco_gt.getCatIds())}
segm_results = mask2out(results, clsid2catid, resolution, thresh_binarize)
results = copy.deepcopy(segm_results)
if len(segm_results) == 0:
logging.warning(
"The number of valid mask detected is zero.\n Please use reasonable model and check input data."
)
return None, results
map_stats = cocoapi_eval(segm_results, 'segm', coco_gt=coco_gt)
return map_stats, results
def cocoapi_eval(anns,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000)):
"""
Args:
anns: Evaluation result.
style: COCOeval style, can be `bbox` , `segm` and `proposal`.
coco_gt: Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file: COCO annotations file.
max_dets: COCO evaluation maxDets.
"""
assert coco_gt != None or anno_file != None
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if coco_gt == None:
coco_gt = COCO(anno_file)
logging.debug("Start evaluate...")
coco_dt = loadRes(coco_gt, anns)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval.stats
def proposal2out(results, is_bbox_normalized=False):
xywh_res = []
for t in results:
bboxes = t['proposal'][0]
lengths = t['proposal'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
assert len(lengths) == im_ids.size
if bboxes.shape == (1, 1) or bboxes is None:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
xmin, ymin, xmax, ymax = dt.tolist()
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
else:
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': 1,
'bbox': bbox,
'score': 1.0
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def bbox2out(results, clsid2catid, is_bbox_normalized=False):
"""
Args:
results: request a dict, should include: `bbox`, `im_id`,
if is_bbox_normalized=True, also need `im_shape`.
clsid2catid: class id to category id map of COCO2017 dataset.
is_bbox_normalized: whether or not bbox is normalized.
"""
xywh_res = []
for t in results:
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
if bboxes.shape == (1, 1) or bboxes is None:
continue
k = 0
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i])
for j in range(num):
dt = bboxes[k]
clsid, score, xmin, ymin, xmax, ymax = dt.tolist()
catid = (clsid2catid[int(clsid)])
if is_bbox_normalized:
xmin, ymin, xmax, ymax = \
clip_bbox([xmin, ymin, xmax, ymax])
w = xmax - xmin
h = ymax - ymin
im_shape = t['im_shape'][0][i].tolist()
im_height, im_width = int(im_shape[0]), int(im_shape[1])
xmin *= im_width
ymin *= im_height
w *= im_width
h *= im_height
else:
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': catid,
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
k += 1
return xywh_res
def mask2out(results, clsid2catid, resolution, thresh_binarize=0.5):
import pycocotools.mask as mask_util
scale = (resolution + 2.0) / resolution
segm_res = []
# for each batch
for t in results:
bboxes = t['bbox'][0]
lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0])
if bboxes.shape == (1, 1) or bboxes is None:
continue
if len(bboxes.tolist()) == 0:
continue
masks = t['mask'][0]
s = 0
# for each sample
for i in range(len(lengths)):
num = lengths[i]
im_id = int(im_ids[i][0])
im_shape = t['im_shape'][0][i]
bbox = bboxes[s:s + num][:, 2:]
clsid_scores = bboxes[s:s + num][:, 0:2]
mask = masks[s:s + num]
s += num
im_h = int(im_shape[0])
im_w = int(im_shape[1])
expand_bbox = expand_boxes(bbox, scale)
expand_bbox = expand_bbox.astype(np.int32)
padded_mask = np.zeros((resolution + 2, resolution + 2),
dtype=np.float32)
for j in range(num):
xmin, ymin, xmax, ymax = expand_bbox[j].tolist()
clsid, score = clsid_scores[j].tolist()
clsid = int(clsid)
padded_mask[1:-1, 1:-1] = mask[j, clsid, :, :]
catid = clsid2catid[clsid]
w = xmax - xmin + 1
h = ymax - ymin + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
resized_mask = cv2.resize(padded_mask, (w, h))
resized_mask = np.array(
resized_mask > thresh_binarize, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x0 = min(max(xmin, 0), im_w)
x1 = min(max(xmax + 1, 0), im_w)
y0 = min(max(ymin, 0), im_h)
y1 = min(max(ymax + 1, 0), im_h)
im_mask[y0:y1, x0:x1] = resized_mask[(y0 - ymin):(y1 - ymin), (
x0 - xmin):(x1 - xmin)]
segm = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
catid = clsid2catid[clsid]
segm['counts'] = segm['counts'].decode('utf8')
coco_res = {
'image_id': im_id,
'category_id': catid,
'segmentation': segm,
'score': score
}
segm_res.append(coco_res)
return segm_res
def expand_boxes(boxes, scale):
"""
Expand an array of boxes by a given scale.
"""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def voc_bbox_eval(results,
coco_gt,
with_background=False,
overlap_thresh=0.5,
map_type='11point',
is_bbox_normalized=False,
evaluate_difficult=False):
"""
Bounding box evaluation for VOC dataset
Args:
results (list): prediction bounding box results.
class_num (int): evaluation class number.
overlap_thresh (float): the postive threshold of
bbox overlap
map_type (string): method for mAP calcualtion,
can only be '11point' or 'integral'
is_bbox_normalized (bool): whether bbox is normalized
to range [0, 1].
evaluate_difficult (bool): whether to evaluate
difficult gt bbox.
"""
assert 'bbox' in results[0]
logging.debug("Start evaluate...")
from pycocotools.coco import COCO
cat_ids = coco_gt.getCatIds()
# when with_background = True, mapping category to classid, like:
# background:0, first_class:1, second_class:2, ...
clsid2catid = dict(
{i + int(with_background): catid
for i, catid in enumerate(cat_ids)})
class_num = len(clsid2catid) + int(with_background)
detection_map = DetectionMAP(
class_num=class_num,
overlap_thresh=overlap_thresh,
map_type=map_type,
is_bbox_normalized=is_bbox_normalized,
evaluate_difficult=evaluate_difficult)
xywh_res = []
det_nums = 0
gt_nums = 0
for t in results:
bboxes = t['bbox'][0]
bbox_lengths = t['bbox'][1][0]
im_ids = np.array(t['im_id'][0]).flatten()
if bboxes.shape == (1, 1) or bboxes is None:
continue
gt_boxes = t['gt_box'][0]
gt_labels = t['gt_label'][0]
difficults = t['is_difficult'][0] if not evaluate_difficult \
else None
if len(t['gt_box'][1]) == 0:
# gt_box, gt_label, difficult read as zero padded Tensor
bbox_idx = 0
for i in range(len(gt_boxes)):
gt_box = gt_boxes[i]
gt_label = gt_labels[i]
difficult = None if difficults is None \
else difficults[i]
bbox_num = bbox_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
gt_box, gt_label, difficult = prune_zero_padding(
gt_box, gt_label, difficult)
detection_map.update(bbox, gt_box, gt_label, difficult)
bbox_idx += bbox_num
det_nums += bbox_num
gt_nums += gt_box.shape[0]
im_id = int(im_ids[i])
for b in bbox:
clsid, score, xmin, ymin, xmax, ymax = b.tolist()
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': clsid2catid[clsid],
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
else:
# gt_box, gt_label, difficult read as LoDTensor
gt_box_lengths = t['gt_box'][1][0]
bbox_idx = 0
gt_box_idx = 0
for i in range(len(bbox_lengths)):
bbox_num = bbox_lengths[i]
gt_box_num = gt_box_lengths[i]
bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
gt_box = gt_boxes[gt_box_idx:gt_box_idx + gt_box_num]
gt_label = gt_labels[gt_box_idx:gt_box_idx + gt_box_num]
difficult = None if difficults is None else \
difficults[gt_box_idx: gt_box_idx + gt_box_num]
detection_map.update(bbox, gt_box, gt_label, difficult)
bbox_idx += bbox_num
gt_box_idx += gt_box_num
im_id = int(im_ids[i])
for b in bbox:
clsid, score, xmin, ymin, xmax, ymax = b.tolist()
w = xmax - xmin + 1
h = ymax - ymin + 1
bbox = [xmin, ymin, w, h]
coco_res = {
'image_id': im_id,
'category_id': clsid2catid[clsid],
'bbox': bbox,
'score': score
}
xywh_res.append(coco_res)
logging.debug("Accumulating evaluatation results...")
detection_map.accumulate()
map_stat = 100. * detection_map.get_map()
logging.debug("mAP({:.2f}, {}) = {:.2f}".format(overlap_thresh, map_type,
map_stat))
return map_stat, xywh_res
def prune_zero_padding(gt_box, gt_label, difficult=None):
valid_cnt = 0
for i in range(len(gt_box)):
if gt_box[i, 0] == 0 and gt_box[i, 1] == 0 and \
gt_box[i, 2] == 0 and gt_box[i, 3] == 0:
break
valid_cnt += 1
return (gt_box[:valid_cnt], gt_label[:valid_cnt],
difficult[:valid_cnt] if difficult is not None else None)
def bbox_area(bbox, is_bbox_normalized):
"""
Calculate area of a bounding box
"""
norm = 1. - float(is_bbox_normalized)
width = bbox[2] - bbox[0] + norm
height = bbox[3] - bbox[1] + norm
return width * height
def jaccard_overlap(pred, gt, is_bbox_normalized=False):
"""
Calculate jaccard overlap ratio between two bounding box
"""
if pred[0] >= gt[2] or pred[2] <= gt[0] or \
pred[1] >= gt[3] or pred[3] <= gt[1]:
return 0.
inter_xmin = max(pred[0], gt[0])
inter_ymin = max(pred[1], gt[1])
inter_xmax = min(pred[2], gt[2])
inter_ymax = min(pred[3], gt[3])
inter_size = bbox_area([inter_xmin, inter_ymin, inter_xmax, inter_ymax],
is_bbox_normalized)
pred_size = bbox_area(pred, is_bbox_normalized)
gt_size = bbox_area(gt, is_bbox_normalized)
overlap = float(inter_size) / (pred_size + gt_size - inter_size)
return overlap
class DetectionMAP(object):
"""
Calculate detection mean average precision.
Currently support two types: 11point and integral
Args:
class_num (int): the class number.
overlap_thresh (float): The threshold of overlap
ratio between prediction bounding box and
ground truth bounding box for deciding
true/false positive. Default 0.5.
map_type (str): calculation method of mean average
precision, currently support '11point' and
'integral'. Default '11point'.
is_bbox_normalized (bool): whther bounding boxes
is normalized to range[0, 1]. Default False.
evaluate_difficult (bool): whether to evaluate
difficult bounding boxes. Default False.
"""
def __init__(self,
class_num,
overlap_thresh=0.5,
map_type='11point',
is_bbox_normalized=False,
evaluate_difficult=False):
self.class_num = class_num
self.overlap_thresh = overlap_thresh
assert map_type in ['11point', 'integral'], \
"map_type currently only support '11point' "\
"and 'integral'"
self.map_type = map_type
self.is_bbox_normalized = is_bbox_normalized
self.evaluate_difficult = evaluate_difficult
self.reset()
def update(self, bbox, gt_box, gt_label, difficult=None):
"""
Update metric statics from given prediction and ground
truth infomations.
"""
if difficult is None:
difficult = np.zeros_like(gt_label)
# record class gt count
for gtl, diff in zip(gt_label, difficult):
if self.evaluate_difficult or int(diff) == 0:
self.class_gt_counts[int( | np.array(gtl) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.