prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2019 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Alpha-Rank for general games.
Namely, computes fixation probabilities, Markov chain, and associated
stationary distribution given a population size and payoff matrix involving
n-strategy interactions.
All equations and variable names correspond to the following paper:
https://arxiv.org/abs/1903.01373
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy.linalg as la
from open_spiel.python.egt import alpharank_visualizer
from open_spiel.python.egt import utils
def _get_payoff(payoff_table_k, payoffs_are_hpt_format, strat_profile, k=None):
"""Gets the payoff of the k-th agent in a single or multi-population game.
Namely, accepts the payoff table of the k-th agent (which can be matrix or
HPT format), the index k of the agent of interest (so its payoff can be looked
up in case of an HPT format payoff table), and the pure strategy profile.
For multipopulation games, we currently only support games where the k-th
agent's payoff is a function of the HPT distribution (a vector
indicating the number of players playing each strategy), as opposed to the
strategy profile (a vector indicating the strategy of each player). This is
due to the nature of the PayoffTable class, which currently only tracks
distributions in the first k columns (rather than profiles).
Args:
payoff_table_k: The k-th agent's payoff table, in matrix or HPT format.
payoffs_are_hpt_format: Boolean indicating whether payoff_table_k is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT) or a
numpy array. True indicates HPT format, False indicates numpy array.
strat_profile: The pure strategy profile.
k: The index of the agent of interest. Only used for HPT case, and only >0
for a multi-population game.
Returns:
The k-th agent's payoff.
"""
if payoffs_are_hpt_format:
# All games are supported when using HPTs
assert k is not None
# Compute HPT distribution (vector of # of players per strategy)
distribution = payoff_table_k.get_distribution_from_profile(strat_profile)
# Lookup the payoff profile (HPT row) corresponding to the distribution
payoff_profile = payoff_table_k[tuple(distribution)]
# Return the payoff corresponding to the k-th agent's strategy
return payoff_profile[strat_profile[k]]
else:
# Only 2 player symmetric/asymmetric games supported using matrix payoffs
return payoff_table_k[tuple(strat_profile)]
def _get_singlepop_2player_fitness(payoff_table, payoffs_are_hpt_format, m,
my_popsize, my_strat, opponent_strat,
use_local_selection_model):
"""Gets a target agent fitness given a finite population of competitors.
Note that this is only applicable to 2-player symmetric games.
Namely, gets fitness of an agent i playing my_strat in underlying population
of (my_popsize agents playing my_strat) and (m-my_popsize agents playing
opponent_strat).
Args:
payoff_table: A payoff table.
payoffs_are_hpt_format: Boolean indicating whether payoff_table is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or a
numpy array. True indicates HPT format, False indicates numpy array.
m: The total number of agents in the population.
my_popsize: The number of agents in the population playing my strategy.
my_strat: Index of my strategy.
opponent_strat: Index of the opposing strategy.
use_local_selection_model: Enable local evolutionary selection model, which
considers fitness against the current opponent only, rather than the
global population state.
Returns:
The fitness of agent i.
"""
if use_local_selection_model:
fitness = payoff_table[tuple([my_strat, opponent_strat])]
else:
fitness = ((my_popsize-1)/(m-1)*
_get_payoff(payoff_table, payoffs_are_hpt_format,
strat_profile=[my_strat, my_strat], k=0) +
(m-my_popsize)/(m-1)*
_get_payoff(payoff_table, payoffs_are_hpt_format,
strat_profile=[my_strat, opponent_strat], k=0))
return fitness
def _get_rho_sr(payoff_table,
payoffs_are_hpt_format,
m,
r,
s,
alpha,
game_is_constant_sum,
use_local_selection_model,
payoff_sum=None):
"""Gets fixation probability of rogue strategy r in population playing s.
Args:
payoff_table: A payoff table.
payoffs_are_hpt_format: Boolean indicating whether payoff_table is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or a
numpy array. True indicates HPT format, False indicates numpy array.
m: The total number of agents in the population.
r: Rogue strategy r.
s: Population strategy s.
alpha: Fermi distribution temperature parameter.
game_is_constant_sum: Boolean indicating if the game is constant sum.
use_local_selection_model: Enable local evolutionary selection model, which
considers fitness against the current opponent only, rather than the
global population state.
payoff_sum: The payoff sum if the game is constant sum, or None otherwise.
Returns:
The fixation probability.
"""
if use_local_selection_model or game_is_constant_sum:
payoff_rs = _get_payoff(
payoff_table, payoffs_are_hpt_format, strat_profile=[r, s], k=0)
if use_local_selection_model:
# Row plays s, column plays r
payoff_sr = _get_payoff(
payoff_table, payoffs_are_hpt_format, strat_profile=[s, r], k=0)
u = alpha * (payoff_rs - payoff_sr)
else:
assert payoff_sum is not None
u = alpha * m / (m - 1) * (payoff_rs - payoff_sum / 2)
if np.isclose(u, 0, atol=1e-14):
# To avoid divide by 0, use first-order approximation when u is near 0
result = 1 / m
else:
result = (1 - np.exp(-u)) / (1 - np.exp(-m * u))
else:
assert payoff_sum is None
summed = 0
for l in range(1, m):
t_mult = 1.
for p_r in range(1, l + 1):
# Probabilities of strategy r decreasing/increasing
p_s = m - p_r
# Fitness of agent playing r against rest of current population
f_ri = _get_singlepop_2player_fitness(
payoff_table,
payoffs_are_hpt_format,
m,
my_popsize=p_r,
my_strat=r,
opponent_strat=s,
use_local_selection_model=use_local_selection_model)
# Fitness of agent playing s against rest of current population
f_sj = _get_singlepop_2player_fitness(
payoff_table,
payoffs_are_hpt_format,
m,
my_popsize=p_s,
my_strat=s,
opponent_strat=r,
use_local_selection_model=use_local_selection_model)
t_mult *= np.exp(-alpha * (f_ri - f_sj))
summed += t_mult
result = (1 + summed)**(-1)
return result
def _get_rho_sr_multipop(payoff_table_k,
payoffs_are_hpt_format,
k,
m,
r,
s,
alpha,
use_fast_compute=True):
"""Gets fixation probability for multi-population games.
Specifically, considers the fitnesses of two strategy profiles r and s given
the payoff table of the k-th population. Profile s is the current profile and
r is a mutant profile. Profiles r and s are identical except for the k-th
element, which corresponds to the deviation of the k-th population's
monomorphic strategy from s[k] to r[k].
Args:
payoff_table_k: The k-th population's payoff table.
payoffs_are_hpt_format: Boolean indicating whether payoff_table_k is a
_PayoffTableInterface object (AKA Heuristic Payoff Table or HPT), or numpy
array. True indicates HPT format, False indicates numpy array.
k: Index of the k-th population.
m: Total number of agents in the k-th population.
r: Strategy profile containing mutant strategy r for population k.
s: Current strategy profile.
alpha: Fermi distribution temperature parameter.
use_fast_compute: Boolean indicating whether closed-form computation should
be used.
Returns:
Probability of strategy r fixating in population k.
"""
# Fitnesses are not dependent on population sizes for multipopulation case, so
# can be computed outside the loops
# Fitness of population k agent given strategy profile r
f_r = _get_payoff(payoff_table_k, payoffs_are_hpt_format, r, k)
# Fitness of population k agent given strategy profile s
f_s = _get_payoff(payoff_table_k, payoffs_are_hpt_format, s, k)
if use_fast_compute:
u = alpha * (f_r - f_s)
if | np.isclose(u, 0, atol=1e-14) | numpy.isclose |
__all__ = ['polygon_clip', 'polygon_area']
import numpy as np
def polygon_clip(rp, cp, r0, c0, r1, c1):
"""Clip a polygon to the given bounding box.
Parameters
----------
rp, cp : (N,) ndarray of double
Row and column coordinates of the polygon.
(r0, c0), (r1, c1) : double
Top-left and bottom-right coordinates of the bounding box.
Returns
-------
r_clipped, c_clipped : (M,) ndarray of double
Coordinates of clipped polygon.
Notes
-----
This makes use of Sutherland-Hodgman clipping as implemented in
AGG 2.4 and exposed in Matplotlib.
"""
from matplotlib import path, transforms
poly = path.Path( | np.vstack((rp, cp)) | numpy.vstack |
from scipy.stats import wasserstein_distance
import numpy as np
import imageio
from PIL import Image
import math
BEGIN = 0.25
END = 1 - BEGIN
WASSERT_DIST_TRASHHOLD = 130
COLOR_DIST_DIFF = 200
CHANNELS = 3
def rgb_wasserstein_distance(imga, imgb):
"""Compares two images in use of wasserstein distance
Args:
imga (numpy array): first image
imgb (numpy array): second image
Returns:
float: similarity
"""
acc=0.0
h1, h2 = get_histogram(imga), get_histogram(imgb)
for j in range(CHANNELS):
acc += wasserstein_distance(h1[j], h2[j]) ** 2
return 10000*math.sqrt(acc)
def get_histogram(img):
"""Calculates histogram from image
Args:
img (numpy array): image
Returns:
array: histogram
"""
h, w, _ = img.shape
hist = [[0.0] * 2**8]*CHANNELS
for c in range(CHANNELS):
for i in range(h):
for j in range(w):
hist[c][img[i, j, c]] += 1
hist[c] = np.array(hist[c]) / (h * w)
return hist
from PIL import Image
import skimage.transform as st
def sim(a, b):
"""Decides if two images are similar
Args:
a (numpy array): first image
b (numpy array): second image
Returns:
dict: json-like dictionary 'similar': TRUE|FALSE
"""
SIZE=224
a,b = | np.asarray(a) | numpy.asarray |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = | np.random.randint(0, 10, (length,)) | numpy.random.randint |
# coding=utf-8
from __future__ import (division, print_function, absolute_import,
unicode_literals)
'''
GCE OMEGA (One-zone Model for the Evolution of Galaxies) module
Functionality
=============
This tool allows one to simulate the chemical evolution of single-zone galaxies.
Having the star formation history as one of the input parameters, OMEGA can
target local galaxies by using observational data found in the literature.
Made by
=======
FEB2015: <NAME>, <NAME>
MAY2015: B. Cote
The code inherits the chem_evol class, which contains common functions shared by
SYGMA and OMEGA. The code in chem_evol has been developed by :
v0.1 NOV2013: <NAME>, <NAME>
v0.2 JAN2014: <NAME>
v0.3 APR2014: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME> &
the NuGrid collaboration
v0.4 FEB2015: <NAME>, B. Cote
v0.5 MAR2015: B. Cote
v0.6 OCT2016: B. Cote
Stop keeking track of version from now on.
MARCH2018: B. Cote
- Switched to Python 3
- Capability to include radioactive isotopes
FEB2019: A. Yagüe, B. Cote
- Optimized to code to run faster
Note
====
Please do not use "tabs" when introducing new lines of code.
Usage
=====
Import the module:
>>> import omega as o
Get help:
>>> help o
Get more information:
>>> o.omega?
Create a custom galaxy (closed box):
>>> o1 = o.omega(cte_sfr=1.0, mgal=1.5e10)
Simulate a known galaxy (open box):
>>> o2 = o.omega(galaxy='sculptor', in_out_control=True, mgal=1e6, mass_loading=8, in_out_ratio=1.5)
Analysis functions: See the Sphinx documentation
'''
# Standard packages
import copy
import math
import random
import os
# Define where is the working directory
# This is where the NuPyCEE code will be extracted
nupy_path = os.path.dirname(os.path.realpath(__file__))
# Import NuPyCEE codes
import NuPyCEE.sygma as sygma
from NuPyCEE.chem_evol import *
class omega( chem_evol ):
'''
Input parameters (OMEGA)
================
Important : By default, a closed box model is always assumed.
galaxy : string
Name of the target galaxy. By using a known galaxy, the code
automatically selects the corresponding star formation history, stellar
mass, and total mass (when available). By using 'none', the user has
perfect control of these three last parameters.
Choices : 'milky_way', 'milky_way_cte', 'sculptor', 'carina', 'fornax',
'none'
Default value : 'none'
Special note : The 'milky_way_cte' option uses the Milky Way's
characteristics, but with a constant star formation history.
cte_sfr : float
Constant star formation history in [Mo/yr].
Default value : 1.0
rand_sfh : float
Maximum possible ratio between the maximum and the minimum values of a star
formation history that is randomly generated.
Default value : 0.0 (deactivated)
Special note : A value greater than zero automatically generates a random
star formation history, which pypasses the use of the cte_sfr parameter.
sfh_file : string
Path to a file containing an input star formation history. The first and
second columns must be the age of the galaxy in [yr] and the star
formation rate in [Mo/yr].
Default value : 'none' (deactivated)
Special note : When a path is specified, it by passes the cte_sfr and the
and_sfh parameters.
stellar_mass_0 : float
Current stellar mass of the galaxy, in [Mo], at the end of the simulation.
Default value : -1.0 (you need to specify a value with unknown galaxies)
in_out_control : boolean
The in_out_control implementation enables control of the outflow and
the inflow rates independently by using constant values (see outflow_rate
and inflow_rate) or by using a mass-loading factor that connects the
rates to the star formation history (see mass_loading and in_out_ratio).
Default value : False (deactivated)
mass_loading : float
Ratio between the outflow rate and the star formation rate.
Default value : 1.0
outflow_rate : float
Constant outflow rate in [Mo/yr].
Default value : -1.0 (deactivated)
Special note : A value greater or equal to zero activates the constant
utflow mode, which bypasses the use of the mass_loading parameter.
in_out_ratio : float
Used in : in_out_control mode
Ratio between the inflow rate and the outflow rate. This parameter is
used to calculate the inflow rate, not the outflow rate.
Default value : 1.0
inflow_rate : float
Used in : in_out_control mode
Constant inflow rate in [Mo/yr].
Default value : -1.0 (deactivated)
Special note : A value greater or equal to zero activates the constant
inflow mode, which bypasses the use of the in_out_ratio parameter.
SF_law : boolean
The SF_law inplementation assumes a Kennicutt-Schmidt star formation law
and combines it to the known input star formation history in order to
derive the mass of the gas reservoir at every timestep.
Default value : False (deactivated)
sfe : float
Used in : SF_law and DM_evolution modes
Star formation efficiency present in the Kennicutt-Schmidt law.
Default value : 0.1
f_dyn : float
Used in : SF_law and DM_evolution modes
Scaling factor used to calculate the star formation timescale present in
the Kennicutt-Schmidt law. We assume that this timescale is equal to a
fraction of the dynamical timescale of the virialized system (dark and
baryonic matter), t_star = f_dyn * t_dyn.
Default value : 0.1
m_DM_0 : float
Used in : SF_law and DM_evolution modes
Current dark matter halo mass of the galaxy, in [Mo], at the end of the
simulations.
Default value : 1.0e+11
t_star : float
Used in : SF_law and DM_evolution modes
Star formation timescale, in [yr], used in the Kennicutt-Schmidt law.
Default value = -1.0 (deactivated)
Special note : A positive value activates the use of this parameter,
which bypasses the f_dyn parameter.
DM_evolution : boolean
The DM_evolution implementation is an extension of the SF_law option.
In addition to using a Kennicutt-Schmidt star formation law, it assumes
an evolution in the total mass of the galaxy as function of time. With
this prescription, the mass-loading factor has a mass dependency. The
mass_loading parameter then only represents the final value at the end
of the simulation.
Default value : False (deactivated)
exp_ml : float
Used in : DM_evolution mode
Exponent of the mass dependency of the mass-loading factor. This last
factor is proportional to M_vir**(-exp_ml/3), where M_vir is the sum of
dark and baryonic matter.
Default value : 2.0
================
'''
#Combine docstrings from chem_evol with sygma docstring
__doc__ = __doc__+chem_evol.__doc__
##############################################
## Constructor ##
##############################################
def __init__(self, galaxy='none', in_out_control=False, SF_law=False, \
DM_evolution=False, Z_trans=1e-20, f_dyn=0.1, sfe=0.1, \
outflow_rate=-1.0, inflow_rate=-1.0, rand_sfh=0.0, cte_sfr=1.0, \
m_DM_0=1.0e11, mass_loading=1.0, t_star=-1.0, sfh_file='none', \
in_out_ratio=1.0, stellar_mass_0=-1.0, \
z_dependent=True, exp_ml=2.0,nsmerger_bdys=[8, 100], \
imf_type='kroupa', alphaimf=2.35, imf_bdys=[0.1,100], \
sn1a_rate='power_law', iniZ=0.0, dt=1e6, special_timesteps=30, \
tend=13e9, mgal=1.0e10, transitionmass=8.0, iolevel=0, \
ini_alpha=True, nb_nsm_per_m=-1.0, t_nsm_coal=30.0e6,\
high_mass_extrapolation='copy',\
table='yield_tables/agb_and_massive_stars_nugrid_MESAonly_fryer12delay.txt', \
use_decay_module=False, yield_tables_dir='',\
f_network='isotopes_modified.prn', f_format=1,\
table_radio='', decay_file='', sn1a_table_radio='',\
bhnsmerger_table_radio='', nsmerger_table_radio='',\
hardsetZ=-1, sn1a_on=True, nsm_dtd_power=[],\
sn1a_table='yield_tables/sn1a_i99_W7.txt',\
ns_merger_on=False, f_binary=1.0, f_merger=0.0008,\
t_merger_max=1.3e10, m_ej_nsm = 2.5e-02, \
nsmerger_table = 'yield_tables/r_process_arnould_2007.txt', \
bhns_merger_on=False, m_ej_bhnsm=2.5e-02, \
bhnsmerger_table = 'yield_tables/r_process_arnould_2007.txt', \
iniabu_table='', extra_source_on=False, \
extra_source_table=['yield_tables/extra_source.txt'], \
f_extra_source=[1.0], pre_calculate_SSPs=False, \
extra_source_mass_range=[[8,30]], \
total_ejecta_interp=True, radio_refinement=100, \
extra_source_exclude_Z=[[]], beta_crit=1.0, \
pop3_table='yield_tables/popIII_heger10.txt', \
imf_bdys_pop3=[0.1,100], imf_yields_range_pop3=[10,30], \
imf_pop3_char_mass=40.0, \
starbursts=[], beta_pow=-1.0, gauss_dtd=[1e9,6.6e8],exp_dtd=2e9,\
nb_1a_per_m=1.0e-3, f_arfo=1, t_merge=-1.0,\
imf_yields_range=[1,30],exclude_masses=[], \
netyields_on=False,wiersmamod=False,skip_zero=False,\
redshift_f=0.0,print_off=False,long_range_ref=False,\
f_s_enhance=1.0,m_gas_f=-1.0, cl_SF_law=False,\
external_control=False, use_external_integration=False,\
calc_SSP_ej=False, tau_ferrini=False,\
input_yields=False, popIII_info_fast=True, t_sf_z_dep = 1.0,\
m_crit_on=False, norm_crit_m=8.0e+09, mass_frac_SSP=0.5,\
sfh_array_norm=-1.0, imf_rnd_sampling=False,\
out_follows_E_rate=False, yield_interp='lin',\
r_gas_star=-1.0, cte_m_gas = -1.0, t_dtd_poly_split=-1.0,\
stellar_param_on=False, delayed_extra_log=False,\
delayed_extra_yields_log_int=False,\
bhnsmerger_dtd_array=np.array([]), dt_in_SSPs=np.array([]), \
DM_array=np.array([]), nsmerger_dtd_array=np.array([]),\
sfh_array=np.array([]),ism_ini=np.array([]),\
ism_ini_radio=np.array([]),\
m_inflow_array=np.array([]), m_gas_array=np.array([]),\
mdot_ini=np.array([]), mdot_ini_t=np.array([]),\
ytables_in=np.array([]), zm_lifetime_grid_nugrid_in=np.array([]),\
isotopes_in=np.array([]), ytables_pop3_in=np.array([]),\
zm_lifetime_grid_pop3_in=np.array([]), ytables_1a_in=np.array([]),\
ytables_nsmerger_in=np.array([]), SSPs_in=np.array([]),\
dt_in=np.array([]), dt_split_info=np.array([]),\
ej_massive=np.array([]), ej_agb=np.array([]),\
ej_sn1a=np.array([]), ej_massive_coef=np.array([]),\
ej_agb_coef=np.array([]), ej_sn1a_coef=np.array([]),\
dt_ssp=np.array([]), r_vir_array=np.array([]),\
mass_sampled=np.array([]), scale_cor=np.array([]),\
mass_sampled_ssp=np.array([]), scale_cor_ssp=np.array([]),\
poly_fit_dtd_5th=np.array([]), poly_fit_range=np.array([]),\
m_tot_ISM_t_in=np.array([]), m_inflow_X_array=np.array([]),\
delayed_extra_dtd=np.array([]), delayed_extra_dtd_norm=np.array([]), \
delayed_extra_yields=np.array([]), delayed_extra_yields_norm=np.array([]),\
delayed_extra_yields_radio=np.array([]), \
delayed_extra_yields_norm_radio=np.array([]), \
ytables_radio_in=np.array([]), radio_iso_in=np.array([]), \
ytables_1a_radio_in=np.array([]), ytables_nsmerger_radio_in=np.array([]),\
omega_0=0.32, omega_b_0=0.05, lambda_0=0.68, H_0=67.11,\
test_clayton=np.array([]), inter_Z_points=np.array([]),\
nb_inter_Z_points=np.array([]), y_coef_M=np.array([]),\
y_coef_M_ej=np.array([]), y_coef_Z_aM=np.array([]),\
y_coef_Z_bM=np.array([]), y_coef_Z_bM_ej=np.array([]),\
tau_coef_M=np.array([]), tau_coef_M_inv=np.array([]),\
tau_coef_Z_aM=np.array([]), tau_coef_Z_bM=np.array([]),\
tau_coef_Z_aM_inv=np.array([]), tau_coef_Z_bM_inv=np.array([]),\
y_coef_M_pop3=np.array([]), y_coef_M_ej_pop3=np.array([]),\
tau_coef_M_pop3=np.array([]), tau_coef_M_pop3_inv=np.array([]),\
inter_lifetime_points_pop3=np.array([]),\
inter_lifetime_points_pop3_tree=np.array([]),\
nb_inter_lifetime_points_pop3=np.array([]),\
inter_lifetime_points=np.array([]), inter_lifetime_points_tree=np.array([]),\
nb_inter_lifetime_points=np.array([]), nb_inter_M_points_pop3=np.array([]),\
inter_M_points_pop3_tree=np.array([]), nb_inter_M_points=np.array([]),\
inter_M_points=np.array([]), y_coef_Z_aM_ej=np.array([])):
# Get the name of the instance
import traceback
(filename,line_number,function_name,text)=traceback.extract_stack()[-2]
self.inst_name = text[:text.find('=')].strip()
# Announce the beginning of the simulation
if not print_off:
print ('OMEGA run in progress..')
start_time = t_module.time()
self.start_time = start_time
# Call the init function of the class inherited by SYGMA
chem_evol.__init__(self, imf_type=imf_type, alphaimf=alphaimf, \
imf_bdys=imf_bdys, sn1a_rate=sn1a_rate, iniZ=iniZ, dt=dt, \
special_timesteps=special_timesteps, tend=tend, mgal=mgal, \
transitionmass=transitionmass, iolevel=iolevel, \
ini_alpha=ini_alpha, table=table, hardsetZ=hardsetZ, \
sn1a_on=sn1a_on, sn1a_table=sn1a_table, nsm_dtd_power=nsm_dtd_power,\
ns_merger_on=ns_merger_on, f_binary=f_binary, f_merger=f_merger,\
nsmerger_table=nsmerger_table, t_merger_max=t_merger_max,\
m_ej_nsm = m_ej_nsm, nb_nsm_per_m=nb_nsm_per_m, t_nsm_coal=t_nsm_coal, \
bhns_merger_on=bhns_merger_on, m_ej_bhnsm=m_ej_bhnsm, \
bhnsmerger_table=bhnsmerger_table, \
table_radio=table_radio, decay_file=decay_file,\
sn1a_table_radio=sn1a_table_radio, \
bhnsmerger_table_radio=bhnsmerger_table_radio,\
nsmerger_table_radio=nsmerger_table_radio,\
iniabu_table=iniabu_table, extra_source_on=extra_source_on, \
extra_source_table=extra_source_table,f_extra_source=f_extra_source, \
extra_source_mass_range=extra_source_mass_range, \
extra_source_exclude_Z=extra_source_exclude_Z,\
pop3_table=pop3_table, \
imf_bdys_pop3=imf_bdys_pop3, \
imf_pop3_char_mass=imf_pop3_char_mass, \
total_ejecta_interp=total_ejecta_interp, \
imf_yields_range_pop3=imf_yields_range_pop3, \
starbursts=starbursts, beta_pow=beta_pow, \
gauss_dtd = gauss_dtd, exp_dtd = exp_dtd, \
nb_1a_per_m=nb_1a_per_m, Z_trans=Z_trans, f_arfo=f_arfo, \
imf_yields_range=imf_yields_range,exclude_masses=exclude_masses, \
netyields_on=netyields_on,wiersmamod=wiersmamod, \
input_yields=input_yields, ism_ini_radio=ism_ini_radio,\
tau_ferrini=tau_ferrini, t_dtd_poly_split=t_dtd_poly_split, \
t_merge=t_merge,popIII_info_fast=popIII_info_fast,\
out_follows_E_rate=out_follows_E_rate,\
use_external_integration=use_external_integration,\
stellar_param_on=stellar_param_on, pre_calculate_SSPs=pre_calculate_SSPs,\
print_off=print_off, yield_tables_dir=yield_tables_dir, \
ism_ini=ism_ini,ytables_in=ytables_in,\
delayed_extra_yields_log_int=delayed_extra_yields_log_int,\
zm_lifetime_grid_nugrid_in=zm_lifetime_grid_nugrid_in,\
isotopes_in=isotopes_in,ytables_pop3_in=ytables_pop3_in,\
zm_lifetime_grid_pop3_in=zm_lifetime_grid_pop3_in,\
ytables_1a_in=ytables_1a_in, dt_in_SSPs=dt_in_SSPs, \
delayed_extra_log=delayed_extra_log, \
nsmerger_dtd_array=nsmerger_dtd_array,\
bhnsmerger_dtd_array=bhnsmerger_dtd_array, \
ytables_nsmerger_in=ytables_nsmerger_in, dt_in=dt_in,\
dt_split_info=dt_split_info,ej_massive=ej_massive,\
ej_agb=ej_agb,ej_sn1a=ej_sn1a,\
ej_massive_coef=ej_massive_coef,ej_agb_coef=ej_agb_coef,\
ej_sn1a_coef=ej_sn1a_coef,dt_ssp=dt_ssp,\
yield_interp=yield_interp, SSPs_in=SSPs_in,\
poly_fit_dtd_5th=poly_fit_dtd_5th,poly_fit_range=poly_fit_range,\
delayed_extra_dtd=delayed_extra_dtd,\
delayed_extra_dtd_norm=delayed_extra_dtd_norm,\
delayed_extra_yields=delayed_extra_yields,\
delayed_extra_yields_norm=delayed_extra_yields_norm,\
delayed_extra_yields_radio=delayed_extra_yields_radio,\
delayed_extra_yields_norm_radio=delayed_extra_yields_norm_radio,\
ytables_radio_in=ytables_radio_in, radio_iso_in=radio_iso_in,\
ytables_1a_radio_in=ytables_1a_radio_in,\
ytables_nsmerger_radio_in=ytables_nsmerger_radio_in,\
test_clayton=test_clayton, radio_refinement=radio_refinement,\
use_decay_module=use_decay_module,\
f_network=f_network, f_format=f_format,\
high_mass_extrapolation=high_mass_extrapolation,\
inter_Z_points=inter_Z_points,\
nb_inter_Z_points=nb_inter_Z_points, y_coef_M=y_coef_M,\
y_coef_M_ej=y_coef_M_ej, y_coef_Z_aM=y_coef_Z_aM,\
y_coef_Z_bM=y_coef_Z_bM, y_coef_Z_bM_ej=y_coef_Z_bM_ej,\
tau_coef_M=tau_coef_M, tau_coef_M_inv=tau_coef_M_inv,\
tau_coef_Z_aM=tau_coef_Z_aM, tau_coef_Z_bM=tau_coef_Z_bM,\
tau_coef_Z_aM_inv=tau_coef_Z_aM_inv, tau_coef_Z_bM_inv=tau_coef_Z_bM_inv,\
y_coef_M_pop3=y_coef_M_pop3, y_coef_M_ej_pop3=y_coef_M_ej_pop3,\
tau_coef_M_pop3=tau_coef_M_pop3, tau_coef_M_pop3_inv=tau_coef_M_pop3_inv,\
inter_lifetime_points_pop3=inter_lifetime_points_pop3,\
inter_lifetime_points_pop3_tree=inter_lifetime_points_pop3_tree,\
nb_inter_lifetime_points_pop3=nb_inter_lifetime_points_pop3,\
inter_lifetime_points=inter_lifetime_points,\
inter_lifetime_points_tree=inter_lifetime_points_tree,\
nb_inter_lifetime_points=nb_inter_lifetime_points,\
nb_inter_M_points_pop3=nb_inter_M_points_pop3,\
inter_M_points_pop3_tree=inter_M_points_pop3_tree,\
nb_inter_M_points=nb_inter_M_points, inter_M_points=inter_M_points,\
y_coef_Z_aM_ej=y_coef_Z_aM_ej)
# Quit if something bad happened in chem_evol ..
if self.need_to_quit:
return
# Calculate the number of CC SNe per Msun formed
if out_follows_E_rate:
A_pop3 = 1.0 / self._imf(imf_bdys_pop3[0],imf_bdys_pop3[1],2)
self.nb_ccsne_per_m_pop3 = \
A_pop3 * self._imf(imf_yields_range_pop3[0], \
imf_yields_range_pop3[1],1)
A = 1.0 / self._imf(imf_bdys[0],imf_bdys[1],2)
self.nb_ccsne_per_m = \
A * self._imf(transitionmass,imf_yields_range[1],1)
# Attribute the input parameters to the current OMEGA object
self.galaxy = galaxy
self.in_out_control = in_out_control
self.SF_law = SF_law
self.DM_evolution = DM_evolution
self.f_dyn = f_dyn
self.sfe = sfe
self.outflow_rate = outflow_rate
self.inflow_rate = inflow_rate
self.rand_sfh = rand_sfh
self.cte_sfr = cte_sfr
self.m_DM_0 = m_DM_0
self.mass_loading = mass_loading
self.t_star = t_star
self.sfh_file = sfh_file
self.in_out_ratio = in_out_ratio
self.stellar_mass_0 = stellar_mass_0
self.z_dependent = z_dependent
self.exp_ml = exp_ml
self.DM_too_low = False
self.skip_zero = skip_zero
self.redshift_f = redshift_f
self.print_off = print_off
self.long_range_ref = long_range_ref
self.m_crit_on = m_crit_on
self.norm_crit_m = norm_crit_m
self.sfh_array_norm = sfh_array_norm
self.DM_array = DM_array
self.sfh_array = sfh_array
self.mdot_ini = mdot_ini
self.mdot_ini_t = mdot_ini_t
self.r_gas_star = r_gas_star
self.m_gas_f = m_gas_f
self.cl_SF_law = cl_SF_law
self.external_control = external_control
self.mass_sampled = mass_sampled
self.scale_cor = scale_cor
self.imf_rnd_sampling = imf_rnd_sampling
self.cte_m_gas = cte_m_gas
self.t_sf_z_dep = t_sf_z_dep
self.out_follows_E_rate = out_follows_E_rate
self.m_tot_ISM_t_in = m_tot_ISM_t_in
self.m_inflow_array = m_inflow_array
self.len_m_inflow_array = len(m_inflow_array)
self.m_inflow_X_array = m_inflow_X_array
self.len_m_inflow_X_array = len(m_inflow_X_array)
self.m_gas_array = m_gas_array
self.len_m_gas_array = len(m_gas_array)
self.beta_crit = beta_crit
self.r_vir_array = r_vir_array
self.pre_calculate_SSPs = pre_calculate_SSPs
# If SSPs needs to be pre-calculated ..
if self.pre_calculate_SSPs:
# Calculate all SSPs
self.__run_all_ssps()
# Create the arrays that will contain the interpolated isotopes
self.ej_SSP_int = np.zeros((self.nb_steps_table,self.nb_isotopes))
if self.len_decay_file > 0:
self.ej_SSP_int_radio = np.zeros((self.nb_steps_table,self.nb_radio_iso))
# If the IMF will randomly be sampled ...
if self.imf_rnd_sampling:
# Print info about the IMF samplint
self.m_pop_max = 1.0e4
print ('IMF random sampling for SSP with M < ',self.m_pop_max)
# Calculate the stellar mass associated with the
# highest IMF value (needed for Monte Carlo)
# ONLY SAMPLING MASSIVE STARS
self.A_rdm = 1.0 / self.transitionmass**(-2.3)
self.m_frac_massive_rdm = self.A_rdm * \
self._imf(self.transitionmass, self.imf_bdys[1], 2)
# Calculate the stellar mass associated with the
# highest IMF value (needed for Monte Carlo)
# SAMPLING ALL STARS (warning! --> Need to modify the code for this)
#self.imf_norm_sampled = 10.0
#self.imfnorm = self.imf_norm_sampled
#imf_temp = []
#m_temp = self.imf_bdys[0]
#dm = 0.02
#while m_temp <= (self.imf_bdys[1]):
# imf_temp.append(self._imf(1.0,2.0,0,mass=m_temp))
# m_temp += dm
#self.imf_max = max(imf_temp)
# Set cosmological parameters - Dunkley et al. (2009)
#self.omega_0 = 0.257 # Current mass density parameter
#self.omega_b_0 = 0.044 # Current baryonic mass density parameter
#self.lambda_0 = 0.742 # Current dark energy density parameter
#self.H_0 = 71.9 # Hubble constant [km s^-1 Mpc^-1]
# Set cosmological parameters - as in Wise et al. 2012
#self.omega_0 = 0.266 # Current mass density parameter
#self.omega_b_0 = 0.0449 # Current baryonic mass density parameter
#self.lambda_0 = 0.734 # Current dark energy density parameter
#self.H_0 = 71.0 # Hubble constant [km s^-1 Mpc^-1]
# Set cosmological parameters - default is Planck 2013 (used in Caterpillar)
self.omega_0 = omega_0 # Current mass density parameter
self.omega_b_0 = omega_b_0 # Current baryonic mass density parameter
self.lambda_0 = lambda_0 # Current dark energy density parameter
self.H_0 = H_0 # Hubble constant [km s^-1 Mpc^-1]
# Look for errors in the input parameters
self.__check_inputs_omega()
# Define whether the open box scenario is used or not
if self.in_out_control or self.SF_law or self.DM_evolution:
self.open_box = True
else:
self.open_box = False
# Check if the timesteps need to be refined
if self.SF_law or self.DM_evolution:
self.t_SF_t = []
self.redshift_t = []
for k in range(self.nb_timesteps):
self.t_SF_t.append(0.0)
self.redshift_t.append(0.0)
self.t_SF_t.append(0.0)
self.redshift_t.append(0.0)
self.calculate_redshift_t()
self.__calculate_t_SF_t()
need_t_raf = False
for i_raf in range(self.nb_timesteps):
if self.history.timesteps[i_raf] > self.t_SF_t[i_raf] / self.sfe:
need_t_raf = True
break
if need_t_raf:
if self.long_range_ref:
self.__rafine_steps_lr()
else:
self.__rafine_steps()
# Re-Create entries for the mass-loss rate of massive stars
self.massive_ej_rate = []
self.sn1a_ej_rate = []
for k in range(self.nb_timesteps + 1):
self.massive_ej_rate.append(0.0)
self.sn1a_ej_rate.append(0.0)
# Declare arrays used to follow the evolution of the galaxy
self.__declare_evol_arrays()
# If the mass fraction ejected by SSPs needs to be calculated ...
# Need to be before self.__initialize_gal_prop()!!
self.mass_frac_SSP = -1.0
if calc_SSP_ej:
# Run SYGMA with five different metallicities
Z = [0.02, 0.01, 0.006, 0.001, 0.0001]
s_inst = []
self.mass_frac_SSP = 0.0
for i_Z_SSP in range(0,len(Z)):
s_inst = sygma.sygma(imf_type=imf_type, alphaimf=alphaimf,\
imf_bdys=imf_bdys, sn1a_rate=sn1a_rate, iniZ=Z[i_Z_SSP], dt=dt, \
special_timesteps=special_timesteps, tend=tend, mgal=1.0, \
transitionmass=transitionmass, iolevel=iolevel, \
ini_alpha=ini_alpha, table=table, hardsetZ=hardsetZ, \
sn1a_on=sn1a_on, sn1a_table=sn1a_table, \
iniabu_table=iniabu_table, extra_source_on=extra_source_on, \
extra_source_table=extra_source_table, pop3_table=pop3_table, \
imf_bdys_pop3=imf_bdys_pop3, \
imf_yields_range_pop3=imf_yields_range_pop3, \
starbursts=starbursts, beta_pow=beta_pow, \
gauss_dtd = gauss_dtd, exp_dtd = exp_dtd, \
nb_1a_per_m=nb_1a_per_m, Z_trans=Z_trans, f_arfo=f_arfo, \
imf_yields_range=imf_yields_range,exclude_masses=exclude_masses,\
netyields_on=netyields_on,wiersmamod=wiersmamod)
self.mass_frac_SSP += np.sum(s_inst.ymgal[-1])
# Calculate the average mass fraction returned
self.mass_frac_SSP = self.mass_frac_SSP / len(Z)
print ('Average SSP mass fraction returned = ',self.mass_frac_SSP)
else:
self.mass_frac_SSP = mass_frac_SSP
# Set the general properties of the selected galaxy
self.__initialize_gal_prop()
# Fill arrays used to follow the evolution
self.__fill_evol_arrays()
# Read the primordial composition of the inflow gas
if self.in_out_control or self.SF_law or self.DM_evolution:
prim_comp_table = os.path.join('yield_tables', 'iniabu',\
'iniab_bb_walker91.txt')
self.prim_comp = ry.read_yield_sn1a_tables(os.path.join(nupy_path,\
prim_comp_table), self.history.isotopes)
# In construction .. need to avoid altering default setups ..
# Assume the baryonic ratio for the initial gas reservoir, if needed
# if len(self.ism_ini) == 0 and not self.SF_law and not self.DM_evolution:
# if self.bar_ratio and not self.cl_SF_law:
# scale_m_tot = self.m_DM_0 * self.omega_b_0 / \
# (self.omega_0*np.sum(self.ymgal[0]))
# for k_cm in range(len(self.ymgal[0])):
# self.ymgal[0][k_cm] = self.ymgal[0][k_cm] * scale_m_tot
# Add the stellar ejecta coming from external galaxies that just merged
if len(self.mdot_ini) > 0:
self.__add_ext_mdot()
# Initialisation of the composition of the gas reservoir
if len(self.ism_ini) > 0:
for i_ini in range(0,self.len_ymgal):
self.ymgal[0][i_ini] = self.ism_ini[i_ini]
# Copy the outflow-vs-SFR array and re-initialize for delayed outflow
if out_follows_E_rate:
self.outflow_test = np.sum(self.m_outflow_t)
self.m_outflow_t_vs_SFR = copy.copy(self.m_outflow_t)
for i_ofer in range(0,self.nb_timesteps):
self.m_outflow_t[i_ofer] = 0.0
# If the timestep are not control by an external program ...
if not self.external_control:
# Run the simulation
self.__run_simulation(mass_sampled, scale_cor)
##############################################
# Check Inputs OMEGA #
##############################################
def __check_inputs_omega(self):
'''
This function checks for incompatible input entries, and stops
the simulation if needed.
'''
# Input galaxy
if not self.galaxy in ['none', 'milky_way', 'milky_way_cte', \
'sculptor', 'fornax', 'carina']:
print ('Error - Selected galaxy not available.')
return
# Random SFH
if self.rand_sfh > 0.0 and self.stellar_mass_0 < 0.0:
print ('Error - You need to choose a current stellar mass.')
return
# Inflow control when non-available
if self.in_out_control and (self.SF_law or self.DM_evolution):
print ('Error - Cannot control inflows and outflows when SF_law or'\
'DM_evolution is equal to True.')
return
# Defined initial dark matter halo mass when non-available
#if self.m_DM_ini > 0.0 and not self.DM_evolution:
# print ('Warning - Can\'t control m_DM_ini when the mass of', \
# 'the dark matter halo is not evolving.')
# Inflow and outflow control when the dark matter mass if evolving
if (self.outflow_rate >= 0.0 or self.inflow_rate >= 0.0) and \
self.DM_evolution:
print ('Error - Cannot fix inflow and outflow rates when the mass'\
'of the dark matter halo is evolving.')
return
# Inflow array when input
if self.len_m_inflow_array > 0:
if not self.len_m_inflow_array == self.nb_timesteps:
print ('Error - len(m_inflow_array) needs to equal nb_timesteps.')
return
# Inflow X array when input
if self.len_m_inflow_X_array > 0:
if not self.len_m_inflow_X_array == self.nb_timesteps:
print ('Error - len(m_inflow_X_array) needs to equal nb_timesteps.')
return
if not len(self.m_inflow_X_array[0]) == self.nb_isotopes:
print ('Error - len(m_inflow_X_array[i]) needs to equal nb_isotopes.')
return
# Mgas array when input
if self.len_m_gas_array > 0:
if not self.len_m_gas_array == (self.nb_timesteps+1):
print ('Error - len(m_gas_array) needs to equal nb_timesteps+1.')
return
##############################################
# Refine Steps #
##############################################
def __rafine_steps(self):
'''
This function increases the number of timesteps if the star formation
will eventually consume all the gas, which occurs when dt > (t_star/sfe).
'''
# Declaration of the new timestep array
if not self.print_off:
print ('..Time refinement..')
new_dt = []
# For every timestep ...
for i_rs in range(0,len(self.history.timesteps)):
# Calculate the critical time delay
t_raf = self.t_SF_t[i_rs] / self.sfe
# If the step needs to be refined ...
if self.history.timesteps[i_rs] > t_raf:
# Calculate the split factor
nb_split = int(self.history.timesteps[i_rs] / t_raf) + 1
# Split the step
for i_sp_st in range(0,nb_split):
new_dt.append(self.history.timesteps[i_rs]/nb_split)
# If ok, don't change anything
else:
new_dt.append(self.history.timesteps[i_rs])
# Update the timestep information
self.nb_timesteps = len(new_dt)
self.history.timesteps = new_dt
# Update self.history.age
self.history.age = [0]
for ii in range(self.nb_timesteps):
self.history.age.append(self.history.age[-1] + new_dt[ii])
self.history.age = np.array(self.history.age)
# If a timestep needs to be added to be synchronized with
# the external program managing merger trees ...
if self.t_merge > 0.0:
# Find the interval where the step needs to be added
i_temp = 0
t_temp = new_dt[0]
while t_temp / self.t_merge < 0.9999999:
i_temp += 1
t_temp += new_dt[i_temp]
# Keep the t_merger index in memory
self.i_t_merger = i_temp
# Update/redeclare all the arrays (stable isotopes)
ymgal = self._get_iniabu()
self.len_ymgal = len(ymgal)
self.mdot, self.ymgal, self.ymgal_massive, self.ymgal_agb, \
self.ymgal_1a, self.ymgal_nsm, self.ymgal_bhnsm, \
self.ymgal_delayed_extra, self.mdot_massive, \
self.mdot_agb, self.mdot_1a, self.mdot_nsm, self.mdot_bhnsm, \
self.mdot_delayed_extra, \
self.sn1a_numbers, self.sn2_numbers, self.nsm_numbers, self.bhnsm_numbers,\
self.delayed_extra_numbers, self.imf_mass_ranges, \
self.imf_mass_ranges_contribution, self.imf_mass_ranges_mtot = \
self._get_storing_arrays(ymgal, len(self.history.isotopes))
# Update/redeclare all the arrays (unstable isotopes)
if self.len_decay_file > 0:
ymgal_radio = np.zeros(self.nb_radio_iso)
# Initialisation of the storing arrays for radioactive isotopes
self.mdot_radio, self.ymgal_radio, self.ymgal_massive_radio, \
self.ymgal_agb_radio, self.ymgal_1a_radio, self.ymgal_nsm_radio, \
self.ymgal_bhnsm_radio, self.ymgal_delayed_extra_radio, \
self.mdot_massive_radio, self.mdot_agb_radio, self.mdot_1a_radio, \
self.mdot_nsm_radio, self.mdot_bhnsm_radio,\
self.mdot_delayed_extra_radio, dummy, dummy, dummy, dummy, dummy, \
dummy, dummy, dummy = \
self._get_storing_arrays(ymgal_radio, self.nb_radio_iso)
# Recalculate the simulation time (used in chem_evol)
self.t_ce = []
self.t_ce.append(self.history.timesteps[0])
for i_init in range(1,self.nb_timesteps):
self.t_ce.append(self.t_ce[i_init-1] + self.history.timesteps[i_init])
##############################################
# Rafine Steps LR #
##############################################
def __rafine_steps_lr(self):
'''
This function increases the number of timesteps if the star formation
will eventually consume all the gas, which occurs when dt > (t_star/sfe).
'''
# Declaration of the new timestep array
if not self.print_off:
print ('..Time refinement (long range)..')
new_dt = []
# For every timestep ...
for i_rs in range(0,len(self.history.timesteps)):
# Calculate the critical time delay
t_raf = self.t_SF_t[i_rs] / self.sfe
# If the step needs to be refined ...
if self.history.timesteps[i_rs] > t_raf:
# Calculate the number of remaining steps
nb_step_rem = len(self.history.timesteps) - i_rs
t_rem = 0.0
for i_rs in range(0,len(self.history.timesteps)):
t_rem += self.history.timesteps[i_rs]
# Calculate the split factor
nb_split = int(t_rem / t_raf) + 1
# Split the step
for i_sp_st in range(0,nb_split):
new_dt.append(t_rem/nb_split)
# Quit the for loop
break
# If ok, don't change anything
else:
new_dt.append(self.history.timesteps[i_rs])
# Update the timestep information
self.nb_timesteps = len(new_dt)
self.history.timesteps = new_dt
# Update self.history.age
self.history.age = [0]
for ii in range(self.nb_timesteps):
self.history.age.append(self.history.age[-1] + new_dt[ii])
self.history.age = np.array(self.history.age)
# If a timestep needs to be added to be synchronized with
# the external program managing merger trees ...
if self.t_merge > 0.0:
# Find the interval where the step needs to be added
i_temp = 0
t_temp = new_dt[0]
while t_temp / self.t_merge < 0.9999999:
i_temp += 1
t_temp += new_dt[i_temp]
# Keep the t_merger index in memory
self.i_t_merger = i_temp
# Update/redeclare all the arrays (stable isotopes)
ymgal = self._get_iniabu()
self.len_ymgal = len(ymgal)
self.mdot, self.ymgal, self.ymgal_massive, self.ymgal_agb, \
self.ymgal_1a, self.ymgal_nsm, self.ymgal_bhnsm, \
self.ymgal_delayed_extra, self.mdot_massive, \
self.mdot_agb, self.mdot_1a, self.mdot_nsm, self.mdot_bhnsm, \
self.mdot_delayed_extra, \
self.sn1a_numbers, self.sn2_numbers, self.nsm_numbers, self.bhnsm_numbers,\
self.delayed_extra_numbers, self.imf_mass_ranges, \
self.imf_mass_ranges_contribution, self.imf_mass_ranges_mtot = \
self._get_storing_arrays(ymgal, len(self.history.isotopes))
# Update/redeclare all the arrays (unstable isotopes)
if self.len_decay_file > 0:
ymgal_radio = np.zeros(self.nb_radio_iso)
# Initialisation of the storing arrays for radioactive isotopes
self.mdot_radio, self.ymgal_radio, self.ymgal_massive_radio, \
self.ymgal_agb_radio, self.ymgal_1a_radio, self.ymgal_nsm_radio, \
self.ymgal_bhnsm_radio, self.ymgal_delayed_extra_radio, \
self.mdot_massive_radio, self.mdot_agb_radio, self.mdot_1a_radio, \
self.mdot_nsm_radio, self.mdot_bhnsm_radio,\
self.mdot_delayed_extra_radio, dummy, dummy, dummy, dummy, dummy, \
dummy, dummy, dummy = \
self._get_storing_arrays(ymgal_radio, self.nb_radio_iso)
# Recalculate the simulation time (used in chem_evol)
self.t_ce = []
self.t_ce.append(self.history.timesteps[0])
for i_init in range(1,self.nb_timesteps):
self.t_ce.append(self.t_ce[i_init-1] + self.history.timesteps[i_init])
##############################################
# Declare Evol Arrays #
##############################################
def __declare_evol_arrays(self):
'''
This function declares the arrays used to follow the evolution of the
galaxy regarding its growth and the exchange of gas with its surrounding.
'''
# Arrays with specific values at every timestep
self.sfr_input = np.zeros(self.nb_timesteps+1) # Star formation rate [Mo yr^-1]
self.m_DM_t = np.zeros(self.nb_timesteps+1) # Mass of the dark matter halo
self.r_vir_DM_t= np.zeros(self.nb_timesteps+1) # Virial radius of the dark matter halo
self.v_vir_DM_t= np.zeros(self.nb_timesteps+1) # Virial velocity of the halo
self.m_tot_ISM_t = np.zeros(self.nb_timesteps+1) # Mass of the ISM in gas
self.m_outflow_t = np.zeros(self.nb_timesteps) # Mass of the outflow at every timestep
self.eta_outflow_t = np.zeros(self.nb_timesteps) # Mass-loading factor == M_outflow / SFR
self.t_SF_t = np.zeros(self.nb_timesteps+1) # Star formation timescale at every timestep
self.m_crit_t = np.zeros(self.nb_timesteps+1) # Critital ISM mass below which no SFR
self.redshift_t = np.zeros(self.nb_timesteps+1) # Redshift associated to every timestep
self.m_inflow_t = np.zeros(self.nb_timesteps) # Mass of the inflow at every timestep
##############################################
# Initialize Gal Prop #
##############################################
def __initialize_gal_prop(self):
'''
This function sets the properties of the selected galaxy, such as its
SFH, its total mass, and its stellar mass.
'''
# No specific galaxy - Use input parameters
if self.galaxy == 'none':
#If an array is used for the SFH ..
if len(self.sfh_array) > 0:
self.__copy_sfr_array()
# If an input file is used for the SFH ...
elif not self.sfh_file == 'none':
self.__copy_sfr_input(self.sfh_file)
# If a star formation law is used in a closed box ...
elif self.cl_SF_law and not self.open_box:
self.__calculate_sfe_cl()
# If a random SFH is chosen ...
elif self.rand_sfh > 0.0:
self.__generate_rand_sfh()
# If the SFH is constant ...
else:
for i_cte_sfr in range(0, self.nb_timesteps+1):
self.sfr_input[i_cte_sfr] = self.cte_sfr
# Milky Way galaxy ...
elif self.galaxy == 'milky_way' or self.galaxy == 'milky_way_cte':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 1.0e12
self.stellar_mass_0 = 5.0e10
# Read Chiappini et al. (2001) SFH
if self.galaxy == 'milky_way':
self.__copy_sfr_input('stellab_data/milky_way_data/sfh_mw_cmr01.txt')
# Read constant SFH
else:
self.__copy_sfr_input('stellab_data/milky_way_data/sfh_cte.txt')
# Sculptor dwarf galaxy ...
elif self.galaxy == 'sculptor':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 1.5e9
self.stellar_mass_0 = 7.8e6
self.stellar_mass_0 = self.stellar_mass_0 * (1-self.mass_frac_SSP)
# Read deBoer et al. (2012) SFH
self.__copy_sfr_input('stellab_data/sculptor_data/sfh_deBoer12.txt')
# Fornax dwarf galaxy ...
elif self.galaxy == 'fornax':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 7.08e8
self.stellar_mass_0 = 4.3e7
self.stellar_mass_0 = self.stellar_mass_0 * (1-self.mass_frac_SSP)
# Read deBoer et al. (2012) SFH
self.__copy_sfr_input('stellab_data/fornax_data/sfh_fornax_deboer_et_al_2012.txt')
# Carina dwarf galaxy ...
elif self.galaxy == 'carina':
# Set the current dark and stellar masses (corrected for mass loss)
self.m_DM_0 = 3.4e6
self.stellar_mass_0 = 1.07e6
self.stellar_mass_0 = self.stellar_mass_0 * (1-self.mass_frac_SSP)
# Read deBoer et al. (2014) SFH
self.__copy_sfr_input('stellab_data/carina_data/sfh_deBoer14.txt')
# Interpolate the last timestep
if len(self.sfr_input) > 3:
aa = (self.sfr_input[-2] - self.sfr_input[-3])/\
self.history.timesteps[-2]
bb = self.sfr_input[-2]- (self.history.tend-self.history.timesteps[-1])*aa
self.sfr_input[-1] = aa*self.history.tend + bb
# Keep the SFH in memory
self.history.sfr_abs = self.sfr_input
##############################################
## Copy SFR Array ##
##############################################
def __copy_sfr_array(self):
'''
See copy_sfr_input() for more info.
'''
# Variable to keep track of the OMEGA's timestep
i_dt_csa = 0
t_csa = 0.0
nb_dt_csa = self.nb_timesteps + 1
# Variable to keep track of the total stellar mass from the input SFH
m_stel_sfr_in = 0.0
# For every timestep given in the array (starting at the second step)
for i_csa in range(1,len(self.sfh_array)):
# Calculate the SFR interpolation coefficient
a_sfr = (self.sfh_array[i_csa][1] - self.sfh_array[i_csa-1][1]) / \
(self.sfh_array[i_csa][0] - self.sfh_array[i_csa-1][0])
b_sfr = self.sfh_array[i_csa][1] - a_sfr * self.sfh_array[i_csa][0]
# While we stay in the same time bin ...
while t_csa <= self.sfh_array[i_csa][0]:
# Interpolate the SFR
self.sfr_input[i_dt_csa] = a_sfr * t_csa + b_sfr
# Cumulate the stellar mass formed
m_stel_sfr_in += self.sfr_input[i_dt_csa] * \
self.history.timesteps[i_dt_csa]
# Exit the loop if the array is full
if i_dt_csa >= nb_dt_csa:
break
# Calculate the new time
t_csa += self.history.timesteps[i_dt_csa]
i_dt_csa += 1
# Exit the loop if the array is full
if (i_dt_csa + 1) >= nb_dt_csa:
break
# If the array has been read completely, but the sfr_input array is
# not full, fil the rest of the array with the last read value
if self.sfh_array[-1][1] == 0.0:
sfr_temp = 0.0
else:
sfr_temp = self.sfr_input[i_dt_csa-1]
while i_dt_csa < nb_dt_csa - 1:
self.sfr_input[i_dt_csa] = sfr_temp
m_stel_sfr_in += self.sfr_input[i_dt_csa] * \
self.history.timesteps[i_dt_csa]
t_csa += self.history.timesteps[i_dt_csa]
i_dt_csa += 1
# Normalise the SFR in order to be consistent with the integrated
# input star formation array (no mass loss considered!)
if self.sfh_array_norm > 0.0:
norm_sfr_in = self.sfh_array_norm / m_stel_sfr_in
for i_csa in range(0, nb_dt_csa):
self.sfr_input[i_csa] = self.sfr_input[i_csa] * norm_sfr_in
# Fill the missing last entry (extention of the last timestep, for tend)
# Since we don't know dt starting at tend, it is not part of m_stel_sfr_in
self.sfr_input[-1] = self.sfr_input[-2]
##############################################
## Calculate SFE Cl. ##
##############################################
def __calculate_sfe_cl(self):
'''
Calculate the star formation efficiency and the initial mass of gas
for a closed box model, given the final gas mass and the current
stellar mass.
'''
# Get the average return gas fraction of SSPs
if self.mass_frac_SSP == -1.0:
f_ej = 0.35
else:
f_ej = self.mass_frac_SSP
# If the gas-to-stellar mass ratio is the selected input ...
if self.r_gas_star > 0.0:
# Calculate the final mass of gas
self.m_gas_f = self.r_gas_star * self.stellar_mass_0
# Calculate the initial mass of gas
m_gas_ini = self.m_gas_f + self.stellar_mass_0
# If the final mass of gas is the selected input ...
elif self.m_gas_f > 0.0:
# Calculate the initial mass of gas
m_gas_ini = self.m_gas_f + self.stellar_mass_0
# If the initial mass of gas is the selected input ...
else:
# Use the input value for the initial mass of gas
m_gas_ini = self.mgal
# Calculate the final mass of gas
self.m_gas_f = m_gas_ini - self.stellar_mass_0
# Verify if the final mass of gas is negative
if self.m_gas_f < 0.0:
self.not_enough_gas = True
sfe_gcs = 1.0e-10
print ('!!Error - Try to have a negative final gas mass!!')
if not self.not_enough_gas:
# Scale the initial mass of all isotopes
scale_m_tot = m_gas_ini / np.sum(self.ymgal[0])
for k_cm in range(len(self.ymgal[0])):
self.ymgal[0][k_cm] = self.ymgal[0][k_cm] * scale_m_tot
# Initialization for finding the right SFE
sfe_gcs = 1.8e-10
sfe_max = 1.0
sfe_min = 0.0
m_gas_f_try = self.__get_m_gas_f(m_gas_ini, sfe_gcs, f_ej)
# While the SFE is not the right one ...
while abs(m_gas_f_try - self.m_gas_f) > 0.01:
# If the SFE needs to be increased ...
if (m_gas_f_try / self.m_gas_f) > 1.0:
# Set the lower limit of the SFE interval
sfe_min = sfe_gcs
# If an upper limit is already defined ...
if sfe_max < 1.0:
# Set the SFE to the middle point of the interval
sfe_gcs = (sfe_max + sfe_gcs) * 0.5
# If an upper limit is not already defined ...
else:
# Try a factor of 2
sfe_gcs = sfe_gcs * 2.0
# If the SFE needs to be decreased ...
else:
# Set the upper limit of the SFE interval
sfe_max = sfe_gcs
# If a lower limit is already defined ...
if sfe_min > 0.0:
# Set the SFE to the middle point of the interval
sfe_gcs = (sfe_min + sfe_gcs) * 0.5
# If a lower limit is not already defined ...
else:
# Try a factor of 2
sfe_gcs = sfe_gcs * 0.5
# Get the approximated final mass of gas
m_gas_f_try = self.__get_m_gas_f(m_gas_ini, sfe_gcs, f_ej)
# Keep the SFE in memory
self.sfe_gcs = sfe_gcs
##############################################
## Get M_gas_f ##
##############################################
def __get_m_gas_f(self, m_gas_ini, sfe_gcs, f_ej):
'''
Return the final mass of gas, given the initial mass of the gas
reservoir and the star formation efficiency. The function uses
a simple star formation law in the form of SFR(t) = sfe * M_gas(t)
'''
# Initialisation of the integration
m_gas_loop = m_gas_ini
t_gmgf = 0.0
# For every timestep ...
for i_gmgf in range(0,self.nb_timesteps):
# Calculate the new mass of gass
t_gmgf += self.history.timesteps[i_gmgf]
#self.sfr_input[i_gmgf] = sfe_gcs * m_gas_loop
m_gas_loop -= sfe_gcs * (1-f_ej) * m_gas_loop * \
self.history.timesteps[i_gmgf]
# Return the final mass of gas
return m_gas_loop
##############################################
# Copy SFR Input #
##############################################
def __copy_sfr_input(self, path_sfh_in):
'''
This function reads a SFH input file and interpolates its values so it
can be inserted in the array "sfr_input", which contains the SFR for each
OMEGA timestep.
Note
====
The input file does not need to have constant time step lengths, and
does not need to have the same number of timesteps as the number of
OMEGA timesteps.
Important
=========
In OMEGA and SYGMA, t += timestep[i] is the first thing done in the main
loop. The loop calculates what happened between the previous t and the
new t. This means the mass of stars formed must be SFR(previous t) *
timestep[i]. Therefore, sfr_input[i] IS NOT the SFR at time t +=
timestep[i], but rather the SFR at previous time which is used for the
current step i.
Argument
========
path_sfh_in : Path of the input SFH file.
'''
# Variable to keep track of the OMEGA timestep
nb_dt_csi = self.nb_timesteps + 1
i_dt_csi = 0
t_csi = 0.0 # Not timesteps[0] because sfr_input[0] must be
# used from t = 0 to t = timesteps[0]
# Variable to keep track of the total stellar mass from the input SFH
m_stel_sfr_in = 0.0
# Open the file containing the SFR vs time
with open(os.path.join(nupy_path, path_sfh_in), 'r') as sfr_file:
# Read the first line (col 0 : t, col 1 : SFR)
line_1_str = sfr_file.readline()
parts_1 = [float(x) for x in line_1_str.split()]
# For every remaining line ...
for line_2_str in sfr_file:
# Extract data
parts_2 = [float(x) for x in line_2_str.split()]
# Calculate the interpolation coefficients (SFR = a*t + b)
a_csi = (parts_2[1] - parts_1[1]) / (parts_2[0] - parts_1[0])
b_csi = parts_1[1] - a_csi * parts_1[0]
# While we stay in the same time bin ...
while t_csi <= parts_2[0]:
# Calculate the right SFR for the specific OMEGA timestep
#self.sfr_input[i_dt_csi] = a_csi * t_csi + b_csi
# Calculate the average SFR for the specific OMEGA timestep
if i_dt_csi < self.nb_timesteps:
self.sfr_input[i_dt_csi] = a_csi * (t_csi + \
self.history.timesteps[i_dt_csi] * 0.5) + b_csi
else:
self.sfr_input[i_dt_csi] = a_csi * t_csi + b_csi
# Cumulate the mass of stars formed
if i_dt_csi < nb_dt_csi - 1:
m_stel_sfr_in += self.sfr_input[i_dt_csi] * \
self.history.timesteps[i_dt_csi]
# Calculate the new time
t_csi += self.history.timesteps[i_dt_csi]
# Go to the next time step
i_dt_csi += 1
# Exit the loop if the array is full
if i_dt_csi >= nb_dt_csi:
break
# Exit the loop if the array is full
if i_dt_csi >= nb_dt_csi:
break
# Copie the last read line
parts_1 = copy.copy(parts_2)
# Close the file
sfr_file.close()
# If the file has been read completely, but the sfr_input array is
# not full, fill the rest of the array with the last read value
while i_dt_csi < nb_dt_csi:
self.sfr_input[i_dt_csi] = self.sfr_input[i_dt_csi-1]
if i_dt_csi < nb_dt_csi - 1:
m_stel_sfr_in += self.sfr_input[i_dt_csi] * \
self.history.timesteps[i_dt_csi]
i_dt_csi += 1
# Normalise the SFR in order to be consistent with the input current
# stellar mass (if the stellar mass is known)
if self.stellar_mass_0 > 0.0:
norm_sfr_in = self.stellar_mass_0 / ((1-self.mass_frac_SSP) * m_stel_sfr_in)
for i_csi in range(0, nb_dt_csi):
self.sfr_input[i_csi] = self.sfr_input[i_csi] * norm_sfr_in
##############################################
# Generate Rand SFH #
##############################################
def __generate_rand_sfh(self):
'''
This function generates a random SFH. This should only be used for
testing purpose in order to look at how the uncertainty associated to the
SFH can affects the results.
The self.rand_sfh sets the maximum ratio between the maximum and the
minimum values for the SFR. This parameter sets how "bursty" or constant
a SFH is. self.rand_sfh = 1 means a constant SFH.
'''
# Variable to keep track of the total stellar mass from the random SFH
m_stel_sfr_in = 0.0
# For each timestep
for i_csi in range(0,self.nb_timesteps+1):
self.sfr_input[i_csi] = random.randrange(1,self.rand_sfh+1)
# Cumulate the total mass of stars formed
if i_csi < self.nb_timesteps:
m_stel_sfr_in += self.sfr_input[i_csi] * \
self.history.timesteps[i_csi]
# Normalise the SFR in order to be consistent with the input
# current stellar mass
norm_sfr_in = self.stellar_mass_0 / ((1-self.mass_frac_SSP) * m_stel_sfr_in)
for i_csi in range(0,len(timesteps)+1):
self.sfr_input[i_csi] = self.sfr_input[i_csi] * norm_sfr_in
##############################################
# Fill Evol Arrays #
##############################################
def __fill_evol_arrays(self):
'''
This function fills the arrays used to follow the evolution of the
galaxy regarding its growth and the exchange of gas with its surrounding.
'''
# Execute this function only if needed
if self.in_out_control or self.SF_law or self.DM_evolution:
# Calculate the redshift for every timestep, if needed
self.calculate_redshift_t()
# Calculate the mass of the dark matter halo at every timestep
self.__calculate_m_DM_t()
# Calculate the virial radius and velocity at every timestep
self.calculate_virial()
# Calculate the critical, mass below which no SFR, at every dt
self.__calculate_m_crit_t()
# Calculate the star formation timescale at every timestep
self.__calculate_t_SF_t()
# Calculate the gas mass of the ISM at every timestep
self.__calculate_m_tot_ISM_t()
# Calculate the mass-loading factor and ouflow mass at every timestep
self.__calculate_outflow_t()
##############################################
# Get t From z #
##############################################
def __get_t_from_z(self, z_gttfz):
'''
This function returns the age of the Universe at a given redshift.
Argument
========
z_gttfz : Redshift that needs to be converted into age.
'''
# Return the age of the Universe
temp_var = math.sqrt((self.lambda_0/self.omega_0)/(1.0+z_gttfz)**3)
x_var = math.log( temp_var + math.sqrt( temp_var**2 + 1.0 ) )
return 2.0 / ( 3.0 * self.H_0 * math.sqrt(self.lambda_0)) * \
x_var * 9.77793067e11
##############################################
# Get z From t #
##############################################
def __get_z_from_t(self, t_gtzft):
'''
This function returns the redshift of a given Universe age.
Argument
========
t_gtzft : Age of the Universe that needs to be converted into redshift.
'''
# Return the redshift
temp_var = 1.5340669e-12 * self.lambda_0**0.5 * self.H_0 * t_gtzft
return (self.lambda_0 / self.omega_0)**0.3333333 / \
math.sinh(temp_var)**0.66666667 - 1.0
##############################################
# Calculate redshift(t) #
##############################################
def calculate_redshift_t(self):
'''
This function calculates the redshift associated to every timestep
assuming that 'tend' represents redshift zero.
'''
# Calculate the current age of the Universe (LambdaCDM - z = 0)
current_age_czt = self.__get_t_from_z(self.redshift_f)
# Calculate the age of the Universe when the galaxy forms
age_formation_czt = current_age_czt - self.history.tend
# Initiate the age of the galaxy
t_czt = 0.0
# Initialize the linear interpolation coefficients
self.redshift_t_coef = np.zeros((self.nb_timesteps,2))
#For each timestep
for i_czt in range(0, self.nb_timesteps+1):
#Calculate the age of the Universe at that time [yr]
age_universe_czt = age_formation_czt + t_czt
#Calculate the redshift at that time
self.redshift_t[i_czt] = self.__get_z_from_t(age_universe_czt)
# Calculate the interpolation coefficients
# z = self.redshift_t_coef[0] * t + self.redshift_t_coef[1]
if i_czt > 0:
self.redshift_t_coef[i_czt-1][0] = \
(self.redshift_t[i_czt]-self.redshift_t[i_czt-1]) / \
self.history.timesteps[i_czt-1]
self.redshift_t_coef[i_czt-1][1] = self.redshift_t[i_czt] - \
self.redshift_t_coef[i_czt-1][0] * t_czt
#Udpate the age of the galaxy [yr]
if i_czt < self.nb_timesteps:
t_czt += self.history.timesteps[i_czt]
#Correction for last digit error (e.g. z = -2.124325345e-8)
if self.redshift_t[-1] < 0.0:
self.redshift_t[-1] = 0.0
##############################################
# Run All SSPs #
##############################################
def __run_all_ssps(self):
'''
Create a SSP with SYGMA for each metallicity available in the yield tables.
Each SSP has a total mass of 1 Msun, is it can easily be re-normalized.
'''
# Copy the metallicities and put them in increasing order
self.Z_table_SSP = copy.copy(self.ytables.metallicities)
self.Z_table_first_nzero = min(self.Z_table_SSP)
if self.popIII_info_fast and self.iniZ <= 0.0 and self.Z_trans > 0.0:
self.Z_table_SSP.append(0.0)
self.Z_table_SSP = sorted(self.Z_table_SSP)
self.nb_Z_table_SSP = len(self.Z_table_SSP)
# If the SSPs are not given as an input ..
if len(self.SSPs_in) == 0:
# Define the SSP timesteps
len_dt_SSPs = len(self.dt_in_SSPs)
if len_dt_SSPs == 0:
dt_in_ras = self.history.timesteps
len_dt_SSPs = self.nb_timesteps
else:
dt_in_ras = self.dt_in_SSPs
# Declare the SSP ejecta arrays [Z][dt][iso]
self.ej_SSP = np.zeros((self.nb_Z_table_SSP,len_dt_SSPs,self.nb_isotopes))
if self.len_decay_file > 0:
self.ej_SSP_radio = \
np.zeros((self.nb_Z_table_SSP,len_dt_SSPs,self.nb_radio_iso))
# For each metallicity ...
for i_ras in range(0,self.nb_Z_table_SSP):
# Use a dummy iniabu file if the metallicity is not zero
if self.Z_table_SSP[i_ras] == 0:
iniabu_t = ''
hardsetZ2 = self.hardsetZ
else:
iniabu_t='yield_tables/iniabu/iniab2.0E-02GN93.ppn'
hardsetZ2 = self.Z_table_SSP[i_ras]
# Run a SYGMA simulation (1 Msun SSP)
sygma_inst = sygma.sygma(pre_calculate_SSPs=False, \
imf_type=self.imf_type, alphaimf=self.alphaimf, \
imf_bdys=self.history.imf_bdys, sn1a_rate=self.history.sn1a_rate, \
iniZ=self.Z_table_SSP[i_ras], dt=self.history.dt, \
special_timesteps=self.special_timesteps, \
nsmerger_bdys=self.nsmerger_bdys, tend=self.history.tend, \
mgal=1.0, transitionmass=self.transitionmass, \
table=self.table, hardsetZ=hardsetZ2, \
sn1a_on=self.sn1a_on, sn1a_table=self.sn1a_table, \
sn1a_energy=self.sn1a_energy, ns_merger_on=self.ns_merger_on, \
bhns_merger_on=self.bhns_merger_on, f_binary=self.f_binary, \
f_merger=self.f_merger, t_merger_max=self.t_merger_max, \
m_ej_nsm=self.m_ej_nsm, nsm_dtd_power=self.nsm_dtd_power,\
m_ej_bhnsm=self.m_ej_bhnsm, bhnsmerger_table=self.bhnsmerger_table, \
nsmerger_table=self.nsmerger_table, iniabu_table=iniabu_t, \
extra_source_on=self.extra_source_on, nb_nsm_per_m=self.nb_nsm_per_m, \
t_nsm_coal=self.t_nsm_coal, extra_source_table=self.extra_source_table, \
f_extra_source=self.f_extra_source, \
extra_source_mass_range=self.extra_source_mass_range, \
extra_source_exclude_Z=self.extra_source_exclude_Z, \
pop3_table=self.pop3_table, imf_bdys_pop3=self.imf_bdys_pop3, \
imf_yields_range_pop3=self.imf_yields_range_pop3, \
starbursts=self.starbursts, beta_pow=self.beta_pow, \
gauss_dtd=self.gauss_dtd, exp_dtd=self.exp_dtd, \
nb_1a_per_m=self.nb_1a_per_m, direct_norm_1a=self.direct_norm_1a, \
imf_yields_range=self.imf_yields_range, \
exclude_masses=self.exclude_masses, netyields_on=self.netyields_on, \
wiersmamod=self.wiersmamod, yield_interp=self.yield_interp, \
stellar_param_on=self.stellar_param_on, \
t_dtd_poly_split=self.t_dtd_poly_split, \
stellar_param_table=self.stellar_param_table, \
tau_ferrini=self.tau_ferrini, delayed_extra_log=self.delayed_extra_log, \
dt_in=dt_in_ras, nsmerger_dtd_array=self.nsmerger_dtd_array, \
bhnsmerger_dtd_array=self.bhnsmerger_dtd_array, \
poly_fit_dtd_5th=self.poly_fit_dtd_5th, \
poly_fit_range=self.poly_fit_range, \
delayed_extra_dtd=self.delayed_extra_dtd, \
delayed_extra_dtd_norm=self.delayed_extra_dtd_norm, \
delayed_extra_yields=self.delayed_extra_yields, \
delayed_extra_yields_norm=self.delayed_extra_yields_norm, \
table_radio=self.table_radio, decay_file=self.decay_file, \
sn1a_table_radio=self.sn1a_table_radio, \
bhnsmerger_table_radio=self.bhnsmerger_table_radio, \
nsmerger_table_radio=self.nsmerger_table_radio, \
ism_ini_radio=self.ism_ini_radio, \
delayed_extra_yields_radio=self.delayed_extra_yields_radio, \
delayed_extra_yields_norm_radio=self.delayed_extra_yields_norm_radio, \
ytables_radio_in=self.ytables_radio_in, radio_iso_in=self.radio_iso_in, \
ytables_1a_radio_in=self.ytables_1a_radio_in, \
ytables_nsmerger_radio_in=self.ytables_nsmerger_radio_in)
# Copy the ejecta arrays from the SYGMA simulation
self.ej_SSP[i_ras] = sygma_inst.mdot
if self.len_decay_file > 0:
self.ej_SSP_radio[i_ras] = sygma_inst.mdot_radio
# If this is the last Z entry ..
if i_ras == self.nb_Z_table_SSP - 1:
# Keep in memory the number of timesteps in SYGMA
self.nb_steps_table = len(sygma_inst.history.timesteps)
self.dt_ssp = sygma_inst.history.timesteps
# Keep the time of ssp in memory
self.t_ssp = np.zeros(self.nb_steps_table)
self.t_ssp[0] = self.dt_ssp[0]
for i_ras in range(1,self.nb_steps_table):
self.t_ssp[i_ras] = self.t_ssp[i_ras-1] + self.dt_ssp[i_ras]
# Clear the memory
del sygma_inst
# Calculate the interpolation coefficients (between metallicities)
self.__calculate_int_coef()
# If the SSPs are given as an input ..
else:
# Copy the SSPs
self.ej_SSP = self.SSPs_in[0]
self.nb_steps_table = len(self.ej_SSP[0])
self.ej_SSP_coef = self.SSPs_in[1]
self.dt_ssp = self.SSPs_in[2]
self.t_ssp = self.SSPs_in[3]
if len(self.SSPs_in) > 4:
self.ej_SSP_radio = self.SSPs_in[4]
self.ej_SSP_coef_radio = self.SSPs_in[5]
self.decay_info = self.SSPs_in[6]
self.len_decay_file = self.SSPs_in[7]
self.nb_radio_iso = len(self.decay_info)
self.nb_new_radio_iso = len(self.decay_info)
del self.SSPs_in
##############################################
# Calculate Int. Coef. #
##############################################
def __calculate_int_coef(self):
'''
Calculate the interpolation coefficients of each isotope between the
different metallicities for every timestep considered in the SYGMA
simulations. ejecta = a * log(Z) + b
self.ej_x[Z][step][isotope][0 -> a, 1 -> b], where the Z index refers
to the lower metallicity boundary of the interpolation.
'''
# Declare the interpolation coefficients arrays
self.ej_SSP_coef = \
np.zeros((2,self.nb_Z_table_SSP,self.nb_steps_table,self.nb_isotopes))
if self.len_decay_file > 0:
self.ej_SSP_coef_radio = \
np.zeros((2,self.nb_Z_table_SSP,self.nb_steps_table,self.nb_radio_iso))
# For each metallicity interval ...
for i_cic in range(0,self.nb_Z_table_SSP-1):
# If the metallicity is not zero ...
if not self.Z_table_SSP[i_cic] == 0.0:
# Calculate the log(Z) for the boundary metallicities
logZ_low = np.log10(self.Z_table_SSP[i_cic])
logZ_up = np.log10(self.Z_table_SSP[i_cic+1])
dif_logZ_inv = 1.0 / (logZ_up - logZ_low)
# For each step ...
for j_cic in range(0,self.nb_steps_table):
# For every stable isotope ..
for k_cic in range(0,self.nb_isotopes):
# Copy the isotope mass for the boundary metallicities
iso_low = self.ej_SSP[i_cic][j_cic][k_cic]
iso_up = self.ej_SSP[i_cic+1][j_cic][k_cic]
# Calculate the "a" and "b" coefficients
self.ej_SSP_coef[0][i_cic][j_cic][k_cic] = \
(iso_up - iso_low) * dif_logZ_inv
self.ej_SSP_coef[1][i_cic][j_cic][k_cic] = iso_up - \
self.ej_SSP_coef[0][i_cic][j_cic][k_cic] * logZ_up
# For every radioactive isotope ..
if self.len_decay_file > 0:
for k_cic in range(0,self.nb_radio_iso):
# Copy the isotope mass for the boundary metallicities
iso_low = self.ej_SSP_radio[i_cic][j_cic][k_cic]
iso_up = self.ej_SSP_radio[i_cic+1][j_cic][k_cic]
# Calculate the "a" and "b" coefficients
self.ej_SSP_coef_radio[0][i_cic][j_cic][k_cic] = \
(iso_up - iso_low) * dif_logZ_inv
self.ej_SSP_coef_radio[1][i_cic][j_cic][k_cic] = iso_up - \
self.ej_SSP_coef_radio[0][i_cic][j_cic][k_cic] * logZ_up
##############################################
# Calculate M_DM(t) #
##############################################
def __calculate_m_DM_t(self):
'''
This functions calculates the mass of the dark matter halo at each
timestep.
'''
# If the mass of the dark matter halo is kept at a constant value ...
if not self.DM_evolution:
# If the dark matter evolution is an input array ...
if len(self.DM_array) > 0:
# Copy the input values
self.copy_DM_input()
# Use the current value for every timestep
else:
for i_cmdt in range(0, self.nb_timesteps+1):
self.m_DM_t[i_cmdt] = self.m_DM_0
# If the mass of the dark matter halo evolves with time ...
else:
# If the dark matter evolution is an input array ...
if len(self.DM_array) > 0:
# Copy the input values
self.copy_DM_input()
# If the dark matter evolution is taken from Millenium simulations ...
else:
# Calculate the polynomial coefficient for the evolution of
# the dark matter mass
poly_up_dm, poly_low_dm = self.__get_DM_bdy()
# For each timestep ...
for i_cmdt in range(0, self.nb_timesteps+1):
# Calculate the lower and upper dark matter mass boundaries
log_m_dm_up = poly_up_dm[0] * self.redshift_t[i_cmdt]**3 + \
poly_up_dm[1] * self.redshift_t[i_cmdt]**2 + poly_up_dm[2] * \
self.redshift_t[i_cmdt] + poly_up_dm[3]
log_m_dm_low = poly_low_dm[0] * self.redshift_t[i_cmdt]**3 + \
poly_low_dm[1] * self.redshift_t[i_cmdt]**2 + poly_low_dm[2]*\
self.redshift_t[i_cmdt] + poly_low_dm[3]
# If the dark matter mass is too low for the available fit ...
if self.DM_too_low:
# Scale the fit using the current input mass
self.m_DM_t[i_cmdt] = 10**log_m_dm_low * \
self.m_DM_0 / 10**poly_low_dm[3]
# If the dark matter mass can be interpolated
else:
# Use a linear interpolation with the log of the mass
a = (log_m_dm_up - log_m_dm_low) / \
(poly_up_dm[3] - poly_low_dm[3])
b = log_m_dm_up - a * poly_up_dm[3]
self.m_DM_t[i_cmdt] = 10**( a * math.log10(self.m_DM_0) + b )
# If the simulation does not stop at redshift zero ...
if not self.redshift_f == 0.0:
# Scale the DM mass (because the fits look at M_DM_0 at z=0)
m_dm_scale = self.m_DM_0 / self.m_DM_t[-1]
for i_cmdt in range(0, self.nb_timesteps+1):
self.m_DM_t[i_cmdt] = self.m_DM_t[i_cmdt] * m_dm_scale
# Create the interpolation coefficients
# M_DM = self.m_DM_t_coef[0] * t + self.m_DM_t_coef[1]
self.m_DM_t_coef = np.zeros((self.nb_timesteps,2))
for i_cmdt in range(0, self.nb_timesteps):
self.m_DM_t_coef[i_cmdt][0] = (self.m_DM_t[i_cmdt+1] - \
self.m_DM_t[i_cmdt]) / self.history.timesteps[i_cmdt]
self.m_DM_t_coef[i_cmdt][1] = self.m_DM_t[i_cmdt] - \
self.m_DM_t_coef[i_cmdt][0] * self.history.age[i_cmdt]
##############################################
# Copy DM Input #
##############################################
def copy_DM_input(self):
'''
This function interpolates the DM masses from an input array
and add the masses to the corresponding OMEGA step
'''
# Variable to keep track of the OMEGA's timestep
i_dt_csa = 0
t_csa = 0.0
nb_dt_csa = self.nb_timesteps
# If just one entry ...
if len(self.DM_array) == 1:
self.m_DM_t[i_dt_csa] = self.DM_array[0][1]
i_dt_csa += 1
# If DM values need to be interpolated ...
else:
# For every timestep given in the array (starting at the second step)
for i_csa in range(1,len(self.DM_array)):
# Calculate the DM interpolation coefficient
a_DM = (self.DM_array[i_csa][1] - self.DM_array[i_csa-1][1]) / \
(self.DM_array[i_csa][0] - self.DM_array[i_csa-1][0])
b_DM = self.DM_array[i_csa][1] - a_DM * self.DM_array[i_csa][0]
# While we stay in the same time bin ...
while t_csa <= self.DM_array[i_csa][0]:
# Interpolate the SFR
self.m_DM_t[i_dt_csa] = a_DM * t_csa + b_DM
# Exit the loop if the array is full
if i_dt_csa >= nb_dt_csa:
break
# Calculate the new time
t_csa += self.history.timesteps[i_dt_csa]
i_dt_csa += 1
# Exit the loop if the array is full
if i_dt_csa >= nb_dt_csa:
break
# If the array has been read completely, but the DM array is
# not full, fil the rest of the array with the last input value
while i_dt_csa < nb_dt_csa+1:
self.m_DM_t[i_dt_csa] = self.DM_array[-1][1]
#self.m_DM_t[i_dt_csa] = self.m_DM_t[i_dt_csa-1]
i_dt_csa += 1
##############################################
# Copy R_vir Input #
##############################################
def copy_r_vir_input(self):
'''
This function interpolates the R_vir from an input array
and add the radius to the corresponding OMEGA step
'''
# Variable to keep track of the OMEGA's timestep
i_dt_csa = 0
t_csa = 0.0
nb_dt_csa = self.nb_timesteps
# If just one entry ...
if len(self.r_vir_array) == 1:
self.r_vir_DM_t[i_dt_csa] = self.r_vir_array[0][1]
i_dt_csa += 1
# If r_vir values need to be interpolated ...
else:
# For every timestep given in the array (starting at the second step)
for i_csa in range(1,len(self.r_vir_array)):
# Calculate the DM interpolation coefficient
a_r_vir = (self.r_vir_array[i_csa][1] - self.r_vir_array[i_csa-1][1]) / \
(self.r_vir_array[i_csa][0] - self.r_vir_array[i_csa-1][0])
b_r_vir = self.r_vir_array[i_csa][1] - a_r_vir * self.r_vir_array[i_csa][0]
# While we stay in the same time bin ...
while t_csa <= self.r_vir_array[i_csa][0]:
# Interpolate r_vir
self.r_vir_DM_t[i_dt_csa] = a_r_vir * t_csa + b_r_vir
# Exit the loop if the array is full
if i_dt_csa >= nb_dt_csa:
break
# Calculate the new time
t_csa += self.history.timesteps[i_dt_csa]
i_dt_csa += 1
# Exit the loop if the array is full
if i_dt_csa >= nb_dt_csa:
break
# If the array has been read completely, but the r_vir array is
# not full, fil the rest of the array with the last input value
while i_dt_csa < nb_dt_csa+1:
self.r_vir_DM_t[i_dt_csa] = self.r_vir_array[-1][1]
#self.r_vir_DM_t[i_dt_csa] = self.r_vir_DM_t[i_dt_csa-1]
i_dt_csa += 1
# Create the interpolation coefficients
# R_vir = self.r_vir_DM_t_coef[0] * t + self.r_vir_DM_t_coef[1]
self.r_vir_DM_t_coef = np.zeros((self.nb_timesteps,2))
for i_cmdt in range(0, self.nb_timesteps):
self.r_vir_DM_t_coef[i_cmdt][0] = (self.r_vir_DM_t[i_cmdt+1] - \
self.r_vir_DM_t[i_cmdt]) / self.history.timesteps[i_cmdt]
self.r_vir_DM_t_coef[i_cmdt][1] = self.r_vir_DM_t[i_cmdt] - \
self.r_vir_DM_t_coef[i_cmdt][0] * self.history.age[i_cmdt]
##############################################
# Get DM Bdy #
##############################################
# Return the fit coefficients for the interpolation of the dark matter mass
def __get_DM_bdy(self):
'''
This function calculates and returns the fit coefficients for the
interpolation of the evolution of the dark matter mass as a function
of time.
'''
# Open the file containing the coefficient of the 3rd order polynomial fit
with open(os.path.join(nupy_path, "m_dm_evolution", "poly3_fits.txt"),\
'r') as m_dm_file:
# Read the first line
line_str = m_dm_file.readline()
parts_1 = [float(x) for x in line_str.split()]
# If the input dark matter mass is higher than the ones provided
# by the fits ...
if math.log10(self.m_DM_0) > parts_1[3]:
# Use the highest dark matter mass available
parts_2 = copy.copy(parts_1)
print ('Warning - Current dark matter mass too high for' \
'the available fits.')
# If the input dark matter mass is in the available range ...
# Find the mass boundary for the interpolation.
else:
# For every remaining line ...
for line_str in m_dm_file:
# Extract data
parts_2 = [float(x) for x in line_str.split()]
# If the read mass is lower than the input dark matter mass
if math.log10(self.m_DM_0) > parts_2[3]:
# Exit the loop and use the current interpolation boundary
break
# Copy the current read line
parts_1 = copy.copy(parts_2)
# Keep track if the input dark matter mass is too low ...
if parts_1[3] == parts_2[3]:
self.DM_too_low = True
#Close the file
m_dm_file.close()
return parts_1, parts_2
##############################################
# Calculate Virial #
##############################################
def calculate_virial(self):
# If R_vir needs to be calculated ..
if len(self.r_vir_array) == 0:
# Average current mass density of the Universe [Mo Mpc^-3]
rho_0_uni = 3.7765e10
# For each timestep ...
for i_cv in range(0,len(self.history.timesteps)+1):
# Calculate the virial radius of the dark matter halo [kpc]
self.r_vir_DM_t[i_cv] = 1.0e3 * 0.106078 * \
(self.m_DM_t[i_cv] / rho_0_uni)**0.3333333 / \
(1 + self.redshift_t[i_cv])
# If R_vir is provided as an input ..
else:
# Use the input array and synchronize the timesteps
self.copy_r_vir_input()
# For each timestep ...
for i_cv in range(0,len(self.history.timesteps)+1):
#Calculate the virial velocity of the dark matter "particles" [km/s]
self.v_vir_DM_t[i_cv] = ( 4.302e-6 * self.m_DM_t[i_cv] / \
self.r_vir_DM_t[i_cv] )** 0.5
# Create the interpolation coefficients
# R_vir = self.r_vir_DM_t_coef[0] * t + self.r_vir_DM_t_coef[1]
self.r_vir_DM_t_coef = np.zeros((self.nb_timesteps,2))
for i_cmdt in range(0, self.nb_timesteps):
self.r_vir_DM_t_coef[i_cmdt][0] = (self.r_vir_DM_t[i_cmdt+1] - \
self.r_vir_DM_t[i_cmdt]) / self.history.timesteps[i_cmdt]
self.r_vir_DM_t_coef[i_cmdt][1] = self.r_vir_DM_t[i_cmdt] - \
self.r_vir_DM_t_coef[i_cmdt][0] * self.history.age[i_cmdt]
# Create the interpolation coefficients
# v_vir = self.v_vir_DM_t_coef[0] * t + self.v_vir_DM_t_coef[1]
self.v_vir_DM_t_coef = np.zeros((self.nb_timesteps,2))
for i_cmdt in range(0, self.nb_timesteps):
self.v_vir_DM_t_coef[i_cmdt][0] = (self.v_vir_DM_t[i_cmdt+1] - \
self.v_vir_DM_t[i_cmdt]) / self.history.timesteps[i_cmdt]
self.v_vir_DM_t_coef[i_cmdt][1] = self.v_vir_DM_t[i_cmdt] - \
self.v_vir_DM_t_coef[i_cmdt][0] * self.history.age[i_cmdt]
##############################################
# Calculate M_crit_t #
##############################################
def __calculate_m_crit_t(self):
# Calculate the real constant
# m_crit_final = self.norm_crit_m * (0.1/2000.0) * \
# (self.v_vir_DM_t[-1] * self.r_vir_DM_t[-1])
# the_constant = m_crit_final / ((0.1/2000.0) * \
# (self.v_vir_DM_t[-1] * self.r_vir_DM_t[-1])**self.beta_crit)
the_constant = self.norm_crit_m
#For each timestep ...
for i_ctst in range(0,len(self.history.timesteps)+1):
# If m_crit_t is wanted ...
if self.m_crit_on:
# Calculate the critical mass (Croton et al. 2006 .. modified)
self.m_crit_t[i_ctst] = the_constant * (0.1/2000.0) * \
(self.v_vir_DM_t[i_ctst] * self.r_vir_DM_t[i_ctst])**self.beta_crit
# If m_crit_t is not wanted ...
else:
# Set the critical mass to zero
self.m_crit_t[i_ctst] = 0.0
# Create the interpolation coefficients
# M_crit = self.m_crit_t_coef[0] * t + self.m_crit_t_coef[1]
self.m_crit_t_coef = np.zeros((self.nb_timesteps,2))
for i_cmdt in range(0, self.nb_timesteps):
self.m_crit_t_coef[i_cmdt][0] = (self.m_crit_t[i_cmdt+1] - \
self.m_crit_t[i_cmdt]) / self.history.timesteps[i_cmdt]
self.m_crit_t_coef[i_cmdt][1] = self.m_crit_t[i_cmdt] - \
self.m_crit_t_coef[i_cmdt][0] * self.history.age[i_cmdt]
##############################################
# Calculate t_SF(t) #
##############################################
def __calculate_t_SF_t(self):
'''
This function calculates the star formation timescale at every timestep.
'''
# Execute this function only if needed
if self.SF_law or self.DM_evolution:
# If the star formation timescale is kept constant ...
if self.t_star > 0:
# Use the same value for every timestep
for i_ctst in range(0, self.nb_timesteps+1):
self.t_SF_t[i_ctst] = self.t_star
# If the timescale follows the halo dynamical time ...
else:
# Set the timescale to a fraction of the halo dynamical time
# See White & Frenk (1991); Springel et al. (2001)
for i_ctst in range(0, self.nb_timesteps+1):
# If the dark matter mass is evolving ...
if self.DM_evolution:
self.t_SF_t[i_ctst] = self.f_dyn * 0.1 * (1 + \
self.redshift_t[i_ctst])**((-1.5)*self.t_sf_z_dep) \
/ self.H_0 * 9.7759839e11
# If the dark matter mass is not evolving ...
else:
self.t_SF_t[i_ctst] = self.f_dyn * 0.1 / self.H_0 * \
9.7759839e11
# Create the interpolation coefficients
# SF_t = self.t_SF_t_coef[0] * t + self.t_SF_t_coef[1]
self.t_SF_t_coef = np.zeros((self.nb_timesteps,2))
for i_cmdt in range(0, self.nb_timesteps):
self.t_SF_t_coef[i_cmdt][0] = (self.t_SF_t[i_cmdt+1] - \
self.t_SF_t[i_cmdt]) / self.history.timesteps[i_cmdt]
self.t_SF_t_coef[i_cmdt][1] = self.t_SF_t[i_cmdt] - \
self.t_SF_t_coef[i_cmdt][0] * self.history.age[i_cmdt]
##############################################
# Calculate M_tot ISM(t) #
##############################################
def __calculate_m_tot_ISM_t(self):
'''
This function calculates the mass of the gas reservoir at every
timestep using a classical star formation law.
'''
# If the evolution of the mass of the ISM is an input ...
if len(self.m_tot_ISM_t_in) > 0:
# Copy and adjust the input array for the OMEGA timesteps
self.__copy_m_tot_ISM_input()
# If the ISM has a constant mass ...
elif self.cte_m_gas > 0.0:
# For each timestep ...
for i_cm in range(0, self.nb_timesteps+1):
self.m_tot_ISM_t[i_cm] = self.cte_m_gas
# If the mass of gas is tighted to the SFH ...
elif self.SF_law or self.DM_evolution:
# For each timestep ...
for i_cm in range(0, self.nb_timesteps+1):
# If it's the last timestep ... use the previous sfr_input
if i_cm == self.nb_timesteps:
# Calculate the total mass of the ISM using the previous SFR
self.m_tot_ISM_t[i_cm] = self.sfr_input[i_cm-1] * \
self.t_SF_t[i_cm] / self.sfe + self.m_crit_t[i_cm]
# If it's not the last timestep ...
else:
# Calculate the total mass of the ISM using the current SFR
self.m_tot_ISM_t[i_cm] = self.sfr_input[i_cm] * \
self.t_SF_t[i_cm] / self.sfe + self.m_crit_t[i_cm]
# If the IO model ...
elif self.in_out_control:
self.m_tot_ISM_t[0] = self.mgal
# Scale the initial gas reservoir that was already set
scale_m_tot = self.m_tot_ISM_t[0] / np.sum(self.ymgal[0])
for k_cm in range(len(self.ymgal[0])):
self.ymgal[0][k_cm] = self.ymgal[0][k_cm] * scale_m_tot
##############################################
# Copy M_tot_ISM Input #
##############################################
def __copy_m_tot_ISM_input(self):
'''
This function interpolates the gas masses from an input array
and add the masses to the corresponding OMEGA step
'''
# Variable to keep track of the OMEGA's timestep
i_dt_csa = 0
t_csa = 0.0
nb_dt_csa = self.nb_timesteps + 1
# If just one entry ...
if len(self.m_tot_ISM_t_in) == 1:
self.m_tot_ISM_t[i_dt_csa] = self.m_tot_ISM_t_in[0][1]
i_dt_csa += 1
# If DM values need to be interpolated ...
else:
# For every timestep given in the array (starting at the second step)
for i_csa in range(1,len(self.m_tot_ISM_t_in)):
# Calculate the DM interpolation coefficient
a_gas = (self.m_tot_ISM_t_in[i_csa][1] - self.m_tot_ISM_t_in[i_csa-1][1]) / \
(self.m_tot_ISM_t_in[i_csa][0] - self.m_tot_ISM_t_in[i_csa-1][0])
b_gas = self.m_tot_ISM_t_in[i_csa][1] - a_gas * self.m_tot_ISM_t_in[i_csa][0]
# While we stay in the same time bin ...
while t_csa <= self.m_tot_ISM_t_in[i_csa][0]:
# Interpolate the SFR
self.m_tot_ISM_t[i_dt_csa] = a_gas * t_csa + b_gas
# Exit the loop if the array is full
if i_dt_csa >= nb_dt_csa:
break
# Calculate the new time
t_csa += self.history.timesteps[i_dt_csa]
i_dt_csa += 1
# Exit the loop if the array is full
if i_dt_csa >= nb_dt_csa:
break
# If the array has been read completely, but the DM array is
# not full, fil the rest of the array with the last read value
while i_dt_csa < nb_dt_csa:
self.m_tot_ISM_t[i_dt_csa] = self.m_tot_ISM_t_in[-1][1]
i_dt_csa += 1
##############################################
# Calculate Outflow #
##############################################
def __calculate_outflow_t(self):
'''
This function calculates the mass-loading factor and the mass of outflow
at every timestep.
'''
# If the outflow rate is kept constant ...
if self.outflow_rate >= 0.0:
# Use the input value for each timestep
for i_ceo in range(0, self.nb_timesteps):
self.eta_outflow_t[i_ceo] = self.outflow_rate / \
self.sfr_input[i_ceo]
self.m_outflow_t[i_ceo] = self.outflow_rate * \
self.history.timesteps[i_ceo]
# If the outflow rate is connected to the SFR ...
else:
# If the mass of the dark matter halo is not evolving
if not self.DM_evolution:
#For each timestep ...
for i_ceo in range(0, self.nb_timesteps):
# Use the input mass-loading factor
self.eta_outflow_t[i_ceo] = self.mass_loading
self.m_outflow_t[i_ceo] = self.eta_outflow_t[i_ceo] * \
self.sfr_input[i_ceo] * self.history.timesteps[i_ceo]
# If the mass of the dark matter halo is evolving
else:
# Use the input mass-loading factor to normalize the evolution
# of this factor as a function of time
eta_norm = self.mass_loading * \
self.m_DM_0**(self.exp_ml*0.33333)* \
(1+self.redshift_f)**(0.5*self.exp_ml)
# For each timestep ...
for i_ceo in range(0, self.nb_timesteps):
# Calculate the mass-loading factor with redshift dependence
if self.z_dependent:
self.eta_outflow_t[i_ceo] = eta_norm * \
self.m_DM_t[i_ceo]**((-0.3333)*self.exp_ml) * \
(1+self.redshift_t[i_ceo])**(-(0.5)*self.exp_ml)
# Calculate the mass-loading factor without redshift dependence
else:
self.eta_outflow_t[i_ceo] = eta_norm * \
self.m_DM_t[i_ceo]**((-0.3333)*self.exp_ml)
# Calculate the outflow mass during the current timestep
self.m_outflow_t[i_ceo] = self.eta_outflow_t[i_ceo] * \
self.sfr_input[i_ceo] * self.history.timesteps[i_ceo]
# Create the interpolation coefficients
# eta = self.eta_outflow_t_coef[0] * t + self.eta_outflow_t_coef[1]
self.eta_outflow_t_coef = np.zeros((self.nb_timesteps,2))
for i_cmdt in range(self.nb_timesteps-1):
self.eta_outflow_t_coef[i_cmdt][0] = (self.eta_outflow_t[i_cmdt+1] - \
self.eta_outflow_t[i_cmdt]) / self.history.timesteps[i_cmdt]
self.eta_outflow_t_coef[i_cmdt][1] = self.eta_outflow_t[i_cmdt] - \
self.eta_outflow_t_coef[i_cmdt][0] * self.history.age[i_cmdt]
self.eta_outflow_t_coef[-1][0] = self.eta_outflow_t_coef[-2][0]
self.eta_outflow_t_coef[-1][1] = self.eta_outflow_t_coef[-2][1]
##############################################
# Add Ext. M_dot #
##############################################
def __add_ext_mdot(self):
'''
This function adds the stellar ejecta of external galaxies that
just merged in the mdot array of the current galaxy. This function
assumes that the times and the number of timesteps of the merging
galaxies are different from the current galaxy.
Notes
=====
i_ext : Step index in the "external" merging mdot array
i_cur : Step index in the "current" galaxy mdot array
t_cur_prev : Lower time limit in the current i_cur bin
t_cur : Upper time limit in the current i_cur bin
M_dot_ini has an extra slot in the isotopes for the time,
which is t = 0.0 for i_ext = 0.
'''
# For every merging galaxy (every branch of a merger tree)
for i_merg in range(0,len(self.mdot_ini)):
# Initialisation of the local variables
i_ext = 0
i_cur = 0
t_cur_prev = 0.0
t_cur = self.history.timesteps[0]
t_ext_prev = 0.0
t_ext = self.mdot_ini_t[i_merg][i_ext+1]
# While the external ejecta has not been fully transfered...
len_mdot_ini_i_merg = len(self.mdot_ini[i_merg])
while i_ext < len_mdot_ini_i_merg and i_cur < self.nb_timesteps:
# While we need to change the external time bin ...
while t_ext <= t_cur:
# Calculate the overlap time between ext. and cur. bins
dt_trans = t_ext - max([t_ext_prev, t_cur_prev])
# Calculate the mass fraction that needs to be transfered
f_dt = dt_trans / (t_ext - t_ext_prev)
# Transfer all isotopes in the current mdot array
self.mdot[i_cur] += self.mdot_ini[i_merg][i_ext] * f_dt
# Move to the next external bin
i_ext += 1
if i_ext == (len_mdot_ini_i_merg):
break
t_ext_prev = t_ext
t_ext = self.mdot_ini_t[i_merg][i_ext+1]
# Quit the loop if all external bins have been considered
if i_ext == (len_mdot_ini_i_merg):
break
# While we need to change the current time bin ...
while t_cur < t_ext:
# Calculate the overlap time between ext. and cur. bins
dt_trans = t_cur - max([t_ext_prev, t_cur_prev])
# Calculate the mass fraction that needs to be transfered
f_dt = dt_trans / (t_ext - t_ext_prev)
# Transfer all isotopes in the current mdot array
self.mdot[i_cur] += self.mdot_ini[i_merg][i_ext] * f_dt
# Move to the next current bin
i_cur += 1
if i_cur == self.nb_timesteps:
break
t_cur_prev = t_cur
t_cur += self.history.timesteps[i_cur]
##############################################
# Run Simulation #
##############################################
def __run_simulation(self, mass_sampled=np.array([]), \
scale_cor=np.array([])):
'''
This function calculates the evolution of the chemical abundances of a
galaxy as a function of time.
Argument
========
mass_sampled : Stars sampled in the IMF by an external program.
scale_cor : Envelope correction for the IMF.
'''
# if self.len_decay_file > 0:
# print ('Warning, radioactive isotopes are missing in the outflows')
# For every timestep i considered in the simulation ...
for i in range(1, self.nb_timesteps+1):
# If the IMF must be sampled ...
if self.imf_rnd_sampling and self.m_pop_max >= \
(self.sfr_input[i-1] * self.history.timesteps[i-1]):
# Get the sampled masses
mass_sampled = self._get_mass_sampled(\
self.sfr_input[i-1] * self.history.timesteps[i-1])
# No mass sampled if using the full IMF ...
else:
mass_sampled = np.array([])
# Run a timestep using the input SFR
self.run_step(i, self.sfr_input[i-1], \
mass_sampled=mass_sampled, scale_cor=scale_cor)
# Calculate the last SFR at the end point of the simulation
if self.cl_SF_law and not self.open_box:
self.history.sfr_abs[-1] = self.sfe_gcs * np.sum(self.ymgal[i])
##############################################
# Run Step #
##############################################
def run_step(self, i, sfr_rs, m_added = np.array([]), m_lost = 0.0, \
no_in_out = False, f_esc_yields=0.0, mass_sampled=np.array([]),
scale_cor=np.array([])):
'''
This function calculates the evolution of one single step in the
chemical evolution.
Argument
========
i : Index of the timestep.
sfr_rs : Input star formation rate [Mo/yr] for the step i.
m_added : Mass (and composition) added for the step i.
m_lost : Mass lost for the step i.
no_in_out : Cancel the open box "if" statement if True
f_esc_yields: Fraction of non-contributing stellar ejecta
mass_sampled : Stars sampled in the IMF by an external program.
scale_cor : Envelope correction for the IMF.
'''
# Make sure that the the number of timestep is not exceeded
if not i == (self.nb_timesteps+1):
# For testing ..
if i == 1:
self.sfr_test = sfr_rs
# Calculate the current mass fraction of gas converted into stars,
# but only if the star formation rate is not followed
# within a self-consistent integration scheme.
if not self.use_external_integration:
self.__cal_m_frac_stars(i, sfr_rs)
else:
self.sfrin = sfr_rs # [Msun/yr]
self.m_locked = self.sfrin * self.history.timesteps[i-1]
# Run the timestep i (!need to be right after __cal_m_frac_stars!)
self._evol_stars(i, f_esc_yields, mass_sampled, scale_cor)
# Decay radioactive isotopes
if self.len_decay_file > 0 and not self.use_external_integration:
if self.use_decay_module:
self._decay_radio_with_module(i)
else:
self._decay_radio(i)
# Delay outflow is needed (following SNe rather than SFR) ...
if self.out_follows_E_rate:
self.__delay_outflow(i)
# Add the incoming gas (if any)
if not self.use_external_integration:
len_m_added = len(m_added)
for k_op in range(0, len_m_added):
self.ymgal[i][k_op] += m_added[k_op]
# If no integration scheme is used to advance the system ..
if not self.use_external_integration:
# If gas needs to be removed ...
if m_lost > 0.0:
# Calculate the gas fraction lost
f_lost = m_lost / sum(self.ymgal[i])
if f_lost > 1.0:
f_lost = 1.0
if not self.print_off:
print ('!!Warning -- Remove more mass than available!!')
# Remove the mass for each isotope
f_lost_2 = (1.0 - f_lost)
self.ymgal[i] = f_lost_2 * self.ymgal[i]
# Radioactive isotopes lost
if self.len_decay_file > 0:
self.ymgal_radio[i] = f_lost_2 * self.ymgal_radio[i]
if not self.pre_calculate_SSPs:
self.ymgal_agb[i] = f_lost_2 * self.ymgal_agb[i]
self.ymgal_1a[i] = f_lost_2 * self.ymgal_1a[i]
self.ymgal_nsm[i] = f_lost_2 * self.ymgal_nsm[i]
self.ymgal_bhnsm[i] = f_lost_2 * self.ymgal_bhnsm[i]
self.ymgal_massive[i] = f_lost_2 * self.ymgal_massive[i]
for iiii in range(0,self.nb_delayed_extra):
self.ymgal_delayed_extra[iiii][i] = \
f_lost_2 * self.ymgal_delayed_extra[iiii][i]
# Radioactive isotopes lost
if self.len_decay_file > 0:
if self.radio_massive_agb_on:
self.ymgal_massive_radio[i] = f_lost_2 * self.ymgal_massive_radio[i]
self.ymgal_agb_radio[i] = f_lost_2 * self.ymgal_agb_radio[i]
if self.radio_sn1a_on:
self.ymgal_1a_radio[i] = f_lost_2 * self.ymgal_1a_radio[i]
if self.radio_nsmerger_on:
self.ymgal_nsm_radio[i] = f_lost_2 * self.ymgal_nsm_radio[i]
if self.radio_bhnsmerger_on:
self.ymgal_bhnsm_radio[i] = f_lost_2 * self.ymgal_bhnsm_radio[i]
for iiii in range(0,self.nb_delayed_extra_radio):
self.ymgal_delayed_extra_radio[iiii][i] = \
f_lost_2 * self.ymgal_delayed_extra_radio[iiii][i]
# If the open box scenario is used (and it is not skipped) ...
if self.open_box and (not no_in_out):
# Calculate the total mass of the gas reservoir at timstep i
# after the star formation and the stellar ejecta
m_tot_current = sum(self.ymgal[i])
# Add inflows
if self.len_m_inflow_X_array > 0.0:
self.ymgal[i] += self.m_inflow_X_array[i-1]
m_inflow_current = self.m_inflow_array[i-1]
self.m_inflow_t[i-1] = float(m_inflow_current)
else:
# Get the current mass of inflow
m_inflow_current = self.__get_m_inflow(i, m_tot_current)
# Add primordial gas coming with the inflow
if m_inflow_current > 0.0:
ym_inflow = self.prim_comp.get(quantity='Yields') * \
m_inflow_current
for k_op in range(0, self.nb_isotopes):
self.ymgal[i][k_op] += ym_inflow[k_op]
# Calculate the fraction of gas removed by the outflow
if not (m_tot_current + m_inflow_current) == 0.0:
if self.len_m_gas_array > 0:
self.m_outflow_t[i-1] = (m_tot_current + m_inflow_current) - self.m_gas_array[i]
frac_rem = self.m_outflow_t[i-1] / (m_tot_current + m_inflow_current)
if frac_rem < 0.0:
frac_rem = 0.0
# Add primordial gas coming with the inflow
self.m_outflow_t[i-1] = 0.0
ym_inflow = self.prim_comp.get(quantity='Yields') * \
(-1.0) * self.m_outflow_t[i-1]
for k_op in range(0, self.nb_isotopes):
self.ymgal[i][k_op] += ym_inflow[k_op]
else:
frac_rem = self.m_outflow_t[i-1] / \
(m_tot_current + m_inflow_current)
else:
frac_rem = 0.0
# Limit the outflow mass to the amount of available gas
if frac_rem > 1.0:
frac_rem = 1.0
self.m_outflow_t[i-1] = m_tot_current + m_inflow_current
if not self.print_off:
print ('Warning - '\
'Outflows eject more mass than available. ' \
'It has been reduced to the amount of available gas.')
# Remove mass from the ISM because of the outflow
self.ymgal[i] *= (1.0 - frac_rem)
if self.len_decay_file > 0:
self.ymgal_radio[i] *= (1.0 - frac_rem)
if not self.pre_calculate_SSPs:
self.ymgal_agb[i] *= (1.0 - frac_rem)
self.ymgal_1a[i] *= (1.0 - frac_rem)
self.ymgal_nsm[i] *= (1.0 - frac_rem)
self.ymgal_bhnsm[i] *= (1.0 - frac_rem)
self.ymgal_massive[i] *= (1.0 - frac_rem)
for iiii in range(0,self.nb_delayed_extra):
self.ymgal_delayed_extra[iiii][i] *= (1.0 - frac_rem)
# Radioactive isotopes lost
if self.len_decay_file > 0:
if self.radio_massive_agb_on:
self.ymgal_massive_radio[i] *= (1.0 - frac_rem)
self.ymgal_agb_radio[i] *= (1.0 - frac_rem)
if self.radio_sn1a_on:
self.ymgal_1a_radio[i] *= (1.0 - frac_rem)
if self.radio_nsmerger_on:
self.ymgal_nsm_radio[i] *= (1.0 - frac_rem)
if self.radio_bhnsmerger_on:
self.ymgal_bhnsm_radio[i] *= (1.0 - frac_rem)
for iiii in range(0,self.nb_delayed_extra_radio):
self.ymgal_delayed_extra_radio[iiii][i] *= (1.0 - frac_rem)
# Get the new metallicity of the gas and update history class
self.zmetal = self._getmetallicity(i)
self._update_history(i)
# If this is the last step ...
if i == self.nb_timesteps:
# Do the final update of the history class
self._update_history_final()
# Add the evolution arrays to the history class
self.history.m_tot_ISM_t = self.m_tot_ISM_t
self.history.eta_outflow_t = self.eta_outflow_t
# If external control ...
if self.external_control:
self.history.sfr_abs[i] = self.history.sfr_abs[i-1]
# Calculate the total mass of gas
self.m_stel_tot = 0.0
for i_tot in range(0,len(self.history.timesteps)):
self.m_stel_tot += self.history.sfr_abs[i_tot] * \
self.history.timesteps[i_tot]
if self.m_stel_tot > 0.0:
self.m_stel_tot = 1.0 / self.m_stel_tot
self.f_m_stel_tot = []
m_temp = 0.0
for i_tot in range(0,len(self.history.timesteps)):
m_temp += self.history.sfr_abs[i_tot] * \
self.history.timesteps[i_tot]
self.f_m_stel_tot.append(m_temp*self.m_stel_tot)
self.f_m_stel_tot.append(self.f_m_stel_tot[-1])
# Announce the end of the simulation
print (' OMEGA run completed -',self._gettime())
# Error message
else:
print ('The simulation is already over.')
##############################################
# Get Mass Sampled #
##############################################
def _get_mass_sampled(self, m_pop):
'''
This function samples randomly the IMF using a Monte Carlo
approach and returns an array with all masses sampled (not
in increasing or decreasing orders).
Argument
========
m_pop : Mass of the considered stellar population
'''
# Initialization of the sampling arrays
mass_sampled_gms = []
m_tot_temp = 0.0
# Define the sampling precision in Msun
precision = 0.01 * m_pop * self.m_frac_massive_rdm
# Copy the lower and upper mass limit of the IMF
m_low_imf = self.transitionmass
m_up_imf = self.imf_bdys[1]
dm_temp = m_up_imf - m_low_imf
# While the total stellar mass is not formed ...
while abs(m_tot_temp - m_pop) > precision:
# Choose randomly a (m,nb) coordinate
rand_m = m_low_imf + np.random.random_sample()*dm_temp
rand_y = np.random.random_sample()
# If the coordinate is below the IMF curve
if rand_y <= (self.A_rdm * rand_m**(-2.3)):
# Add the stellar mass only if it doesn't
# form to much mass compared to m_pop
if (m_tot_temp + rand_m) - m_pop <= precision:
mass_sampled_gms.append(rand_m)
m_tot_temp += rand_m
# Stop if cannot fit a massive star
if abs(m_tot_temp - m_pop) < self.transitionmass:
break
# Return the stellar masses sampled using Monte Carlo
return mass_sampled_gms
##############################################
# Cal M Frac Stars #
##############################################
def __cal_m_frac_stars(self, i, sfr_rs):
'''
This function calculates the mass fraction of the gas reservoir that
is converted into stars at a given timestep.
Argument
========
i : Index of the timestep.
sfr_rs : Star formation rate [Mo/yr] for the timestep i
'''
# If the SFR is calculated from a star formation law (closed box)
if self.cl_SF_law and not self.open_box:
self.history.sfr_abs[i-1] = self.sfe_gcs * np.sum(self.ymgal[i-1])
self.sfrin = self.history.sfr_abs[i-1] * self.history.timesteps[i-1]
else:
# Calculate the total mass of stars formed during this timestep
self.sfrin = sfr_rs * self.history.timesteps[i-1]
self.history.sfr_abs[i-1] = sfr_rs
# Calculate the mass fraction of gas converted into stars
mgal_tot = 0.0
for k_ml in range(0, self.nb_isotopes):
mgal_tot += self.ymgal[i-1][k_ml]
if mgal_tot <= 0.0:
self.sfrin = 0.0
else:
self.sfrin = self.sfrin / mgal_tot
# Modify the history of SFR if there is not enough gas
if self.sfrin > 1.0:
self.history.sfr_abs[i-1] = mgal_tot / self.history.timesteps[i-1]
##############################################
# Delay Outflow #
##############################################
def __delay_outflow(self, i):
'''
This function convert the instantaneous outflow rate (vs SFR) into a delayed
rate where Mout follows the number of CC SNe.
Argument
========
i : Index of the timestep.
'''
# Calculate the 1 / (total number of CC SNe in the SSP)
if self.m_locked <= 0.0:
nb_cc_sne_inv = 1.0e+30
elif self.zmetal <= 0.0 and self.Z_trans > 0.0:
nb_cc_sne_inv = 1.0 / (self.nb_ccsne_per_m_pop3 * self.m_locked)
else:
nb_cc_sne_inv = 1.0 / (self.nb_ccsne_per_m * self.m_locked)
# Calculate the fraction of CC SNe in each future timesteps
len_ssp_nb_cc_sne = len(self.ssp_nb_cc_sne)
f_nb_cc = np.zeros(len_ssp_nb_cc_sne, np.float64)
for i_nb_cc in range(0,len_ssp_nb_cc_sne):
f_nb_cc[i_nb_cc] = self.ssp_nb_cc_sne[i_nb_cc] * nb_cc_sne_inv
# Copy the original instanteneous mass outflow [Msun]
m_out_inst = self.m_outflow_t_vs_SFR[i-1]
# For each future timesteps including the current one ...
for i_do in range(0,len_ssp_nb_cc_sne):
# Add the delayed mass outflow
#print (i, i_do, i+i_do, len(self.m_outflow_t))
self.m_outflow_t[i-1+i_do] += m_out_inst * f_nb_cc[i_do]
##############################################
# Get M Inflow #
##############################################
def __get_m_inflow(self, i, m_tot_current):
'''
This function calculates and returns the inflow mass at a given timestep
Argument
========
i : Index of the timestep.
m_tot_current : Total mass of the gas reservoir at step i
'''
# If an inflow mass in given at each timestep as an input ...
if self.len_m_inflow_array > 0:
m_inflow_current = self.m_inflow_array[i-1]
# If the constant inflow rate is kept constant ...
elif self.inflow_rate >= 0.0:
# Use the input rate to calculate the inflow mass
# Note : i-1 --> current timestep, see __copy_sfr_input()
m_inflow_current = self.inflow_rate * self.history.timesteps[i-1]
# If the inflow rate follows the outflow rate ...
elif self.in_out_control:
# Use the input scale factor to calculate the inflow mass
if self.out_follows_E_rate:
m_inflow_current = self.in_out_ratio * self.m_outflow_t_vs_SFR[i-1]
else:
m_inflow_current = self.in_out_ratio * self.m_outflow_t[i-1]
# If the inflow rate is calculated from the main equation ...
else:
# If SFR = 0 and we do not want to use the main equation ..
if self.sfr_input[i] == 0 and self.skip_zero:
m_inflow_current = 0.0
else:
# Calculate the mass of the inflow
m_inflow_current = self.m_tot_ISM_t[i] - \
m_tot_current + self.m_outflow_t[i-1]
# If the inflow mass is negative ...
if m_inflow_current < 0.0:
# Convert the negative inflow into positive outflow
if not self.skip_zero:
self.m_outflow_t[i-1] += (-1.0) * m_inflow_current
if not self.print_off:
print ('Warning - Negative inflow. ' \
'The outflow rate has been increased.', i)
# Assume no inflow
m_inflow_current = 0.0
# Keep the mass of inflow in memory
self.m_inflow_t[i-1] = float(m_inflow_current)
return m_inflow_current
###############################################################################################
######################## Here start the analysis methods ######################################
###############################################################################################
#### trueman edits
def mass_frac_plot(self,fig=0,species=['all'],sources=['agb','massive','1a'],\
cycle=-1, solar_ref='Asplund_et_al_2009',yscale='log'):
'''
fractional contribution from each stellar source towards the galactic total relative to solar
Parameters
----------
species : array of strings
isotope or element name,
e.g. ['H-1','He-4','Fe','Fe-56']
default = ['all']
sources : array of strings
specifies the stellar sources to plot,
e.g. ['agb','massive','1a']
cycle : float
specifies cycle number to plot,
e.g. 'cycle=-1' will plot last cycle
solar_ref : string
the solar abundances used as a reference
default is Asplund et al. 2009
'Asplund_et_al_2009'
'Anders_Grevesse_1989'
'Grevesse_Noels_1993'
'Grevesse_Sauval_1998'
'Lodders_et_al_2009'
yscale: string
choose y axis scale
'log' or 'linear'
Examples
---------
>>> s.plot(['all']['agb','massive','1a'],
cycle=-1, solar_ref='Lodders', yscale='log')
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
f = open(os.path.join(nupy_path, 'stellab_data',\
'solar_normalization', str(solar_ref) + '.txt'), 'r')
g = open(os.path.join(nupy_path, 'stellab_data',\
'solar_normalization', 'element_mass.txt'), 'r')
h = open(os.path.join(nupy_path, 'stellab_data',\
'solar_normalization', 'Asplund_et_al_2009_iso.txt'), 'r')
lines=f.readlines()
lines_g=g.readlines()
lines_h=h.readlines()
ele_mass = []
ele_nam = []
abu_sol = []
ele_sol = []
iso_nam =[]
iso_frac =[]
# items taken from Asplund
# keys = element symbol, values = logarithmic solar abundnace
for i in lines:
ele_sol.append(i.split()[1])
abu_sol.append(float(i.split()[2]))
f.close()
sol_dict = dict(zip(ele_sol, abu_sol))
# items taken from online data table
# keys = element symbol, values = element mass number
for j in lines_g:
ele_mass.append(float(j.split()[0]))
ele_nam.append(j.split()[2])
g.close()
ele_dict = dict(zip(ele_nam, ele_mass))
# items taken from Asplund
# keys = isotope symbol, values = relative number fraction of isotope
for k in lines_h:
iso_nam.append(k.split()[0])
iso_frac.append(float(k.split()[1])/100)
h.close()
iso_frac_dict = dict(zip(iso_nam, iso_frac))
# Create a dictionary with keys = element symbol
# and vals = solar mass fraction
ele_mass_frac = {}
for ele,mass in ele_dict.items():
for el,abu in sol_dict.items():
if ele == el:
ele_mass_frac.update([(ele,10**(abu-12)*mass*0.7381)])
# Normalise the above dictionary so that mass fractions
# sum to unity
tot_mass_frac = sum(ele_mass_frac.values())
for ele,frac in ele_mass_frac.items():
sol_dict.update([(ele,frac/tot_mass_frac)])
# Create a dictionary with keys = isotope
# vals = (mass fraction)/(isotope mass)
new = {}
for ele,mass in ele_dict.items():
for iso,frac in iso_frac_dict.items():
if ele == iso.split('-',1)[0]:
new.update([(iso,frac/mass)])
# Create a dictionary with keys = isotope
# vals = contribution towards total element mass fraction from each isotope
weighted_iso_frac={}
for ele,frac in sol_dict.items():
for iso,fracs in new.items():
if ele == iso.split('-',1)[0]:
weighted_iso_frac.update([
(iso,frac*fracs*float(iso.split('-',1)[-1]))])
species_mass_frac_sol_dict = weighted_iso_frac
species_mass_frac_sol_dict.update(sol_dict)
# Remove species which have no solar mass data
remove_keys = []
for key,val in species_mass_frac_sol_dict.items():
if val < 10e-30:
remove_keys.append(key)
for i in remove_keys:
if i in species_mass_frac_sol_dict:
del species_mass_frac_sol_dict[i]
iso_mass_gal = dict(zip(self.history.isotopes, self.ymgal[cycle]))
ele_dum=[]
for iso,mass in iso_mass_gal.items(): # create a list of the elements
ele = (iso.split('-',1)[0]) # from list of isotopes
ele_dum.append(ele)
elements = | np.unique(ele_dum) | numpy.unique |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 20 12:07:57 2019
@author: johnmount
"""
import numpy
import pandas
import vtreat.util
import vtreat.transform
class VarTransform:
"""build a treatment plan for a numeric outcome (regression)"""
def __init__(self, incoming_column_name, derived_column_names, treatment):
self.incoming_column_name_ = incoming_column_name
self.derived_column_names_ = derived_column_names.copy()
self.treatment_ = treatment
self.need_cross_treatment_ = False
self.refitter_ = None
def transform(self, data_frame):
raise NotImplementedError("base method called")
class MappedCodeTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name, treatment, code_book):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], treatment
)
self.code_book_ = code_book
def transform(self, data_frame):
incoming_column_name = self.incoming_column_name_
derived_column_name = self.derived_column_names_[0]
sf = pandas.DataFrame({incoming_column_name: data_frame[incoming_column_name]})
bad_posns = vtreat.util.is_bad(sf[incoming_column_name])
sf.loc[bad_posns, incoming_column_name] = "_NA_"
res = pandas.merge(
sf, self.code_book_, on=[self.incoming_column_name_], how="left", sort=False
) # ordered by left table rows
res = res[[derived_column_name]].copy()
res.loc[vtreat.util.is_bad(res[derived_column_name]), derived_column_name] = 0
return res
class YAwareMappedCodeTransform(MappedCodeTransform):
def __init__(
self,
incoming_column_name,
derived_column_name,
treatment,
code_book,
refitter,
extra_args,
params,
):
MappedCodeTransform.__init__(
self,
incoming_column_name=incoming_column_name,
derived_column_name=derived_column_name,
treatment=treatment,
code_book=code_book,
)
self.need_cross_treatment_ = True
self.refitter_ = refitter
self.extra_args_ = extra_args
self.params_ = params
class CleanNumericTransform(VarTransform):
def __init__(self, incoming_column_name, replacement_value):
VarTransform.__init__(
self, incoming_column_name, [incoming_column_name], "clean_copy"
)
self.replacement_value_ = replacement_value
def transform(self, data_frame):
col = numpy.asarray(data_frame[self.incoming_column_name_].copy()).astype(float)
bad_posns = vtreat.util.is_bad(col)
col[bad_posns] = self.replacement_value_
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res
class IndicateMissingTransform(VarTransform):
def __init__(self, incoming_column_name, derived_column_name):
VarTransform.__init__(
self, incoming_column_name, [derived_column_name], "missing_indicator"
)
def transform(self, data_frame):
col = vtreat.util.is_bad(data_frame[self.incoming_column_name_])
res = pandas.DataFrame({self.derived_column_names_[0]: col})
return res.astype(float)
def fit_regression_impact_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
if params["use_hierarchical_estimate"]:
sf["_impact_code"] = sf["_hest"] - sf["_gm"]
else:
sf["_impact_code"] = sf["_group_mean"] - sf["_gm"]
sf = sf.loc[:, ["x", "_impact_code"]].copy()
newcol = incoming_column_name + "_impact_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="impact_code",
code_book=sf,
refitter=fit_regression_impact_code,
extra_args=extra_args,
params=params,
)
def fit_regression_deviation_code(*, incoming_column_name, x, y, extra_args, params):
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
sf["_deviation_code"] = numpy.sqrt(sf["_var"])
sf = sf.loc[:, ["x", "_deviation_code"]].copy()
newcol = incoming_column_name + "_deviation_code"
sf.columns = [incoming_column_name, newcol]
return YAwareMappedCodeTransform(
incoming_column_name=incoming_column_name,
derived_column_name=newcol,
treatment="deviation_code",
code_book=sf,
refitter=fit_regression_deviation_code,
extra_args=extra_args,
params=params,
)
def fit_binomial_impact_code(*, incoming_column_name, x, y, extra_args, params):
outcome_target = (extra_args["outcome_target"],)
var_suffix = extra_args["var_suffix"]
y = numpy.asarray(numpy.asarray(y) == outcome_target, dtype=numpy.float64)
sf = vtreat.util.grouped_by_x_statistics(x, y)
if sf.shape[0] <= 1:
return None
eps = 1.0e-3
if params["use_hierarchical_estimate"]:
sf["_logit_code"] = | numpy.log((sf["_hest"] + eps) / (sf["_gm"] + eps)) | numpy.log |
import collections
import numpy as np
from guesswhat.statistics.abstract_plotter import *
import seaborn as sns
import pandas as pd
class SuccessNoObject(AbstractPlotter):
def __init__(self, path, games, logger, suffix):
super(SuccessNoObject, self).__init__(path, self.__class__.__name__, suffix)
status_count = collections.Counter()
status_list = []
objects = []
for game in games:
status_count[game.status] += 1
status_list.append(game.status)
objects.append(len(game.objects))
sns.set(style="whitegrid", color_codes=True)
success = np.array([s == "success" for s in status_list]) + 0
failure = np.array([s == "failure" for s in status_list]) + 0
incomp = np.array([s == "incomplete" for s in status_list]) + 0
if sum(incomp) > 0:
columns = ['No objects', 'Success', 'Failure', 'Incomplete']
data = np.array([objects, success, failure, incomp]).transpose()
else:
columns = ['No objects', 'Success', 'Failure']
data = np.array([objects, success, failure]).transpose()
df = pd.DataFrame(data, columns=columns)
df = df.convert_objects(convert_numeric=True)
df = df.groupby('No objects').sum()
f = df.plot(kind="bar", stacked=True, width=1, alpha=0.3, color=["g", "r", "b"])
sns.regplot(x=np.array([0]), y=np.array([0]), scatter=False, line_kws={'linestyle':'--'}, label="% Success",ci=None, color="b")
#f.set_xlim(0.5,18.5)
#f.set_ylim(0,25000)
f.set_xlabel("Number of objects", {'size':'14'})
f.set_ylabel("Number of dialogues", {'size':'14'})
f.legend(loc="best", fontsize='large')
###########################
success = np.array([s == "success" for s in status_list])
failure = np.array([s == "failure" for s in status_list])
incomp = np.array([s == "incomplete" for s in status_list])
objects = np.array(objects)
rng = range(3, 22)
histo_success = np.histogram(objects[success], bins=rng)
histo_failure = np.histogram(objects[failure], bins=rng)
histo_incomp = np.histogram(objects[incomp], bins=rng)
normalizer = histo_success[0] + histo_failure[0] + histo_incomp[0]
histo_success = 1.0*histo_success[0] / normalizer
histo_failure = 1.0*histo_failure[0] / normalizer
histo_incomp = 1.0*histo_incomp[0] / normalizer
ax2 = f.twinx()
curve = np.ones(len(normalizer))-histo_failure-histo_incomp
f = sns.regplot(x= | np.linspace(1, 20, 18) | numpy.linspace |
import os
from sys import argv, stdout
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
import numpy as np
import scipy
import scipy.io
from itertools import product as prod
import time
from tensorflow.python.client import timeline
import cProfile
from sys import argv, stdout
from get_data import *
import pathlib
from noise_models_and_integration import *
from architecture import *
# from experiments import noise_1_paramas as noise_params
def variation_acc2_local_disturb(sess,
network,
x_,
keep_prob,
saver,
test_input,
test_target,
params):
eps = 10 ** (-params.eps_order)
# restoring saved model
saver.restore(sess, "weights/dim_{}/{}/gam_{}_alfa_{}.ckpt".format(params.model_dim, params.noise_name, params.gamma, params.alpha))
# initializoing resulting tensor, first two dimensions corresponds to coordinate which will be disturbed, on the last dimension, there will be added variation of outputs
results = np.zeros((n_ts, controls_nb, len(np.array(test_input))))
print(len(test_input))
print(np.shape(results))
iter = -1
for sample_nb in range(len(np.array(test_input))):
# taking sample NCP
origin_NCP = test_input[sample_nb]
# taking target superoperator corresponding to the NCP
origin_superoperator = test_target[sample_nb]
tf_result = False
# calculating nnDCP corresponding to input NCP
pred_DCP = get_prediction(sess, network, x_, keep_prob, np.reshape(origin_NCP, [1, params.n_ts, params.controls_nb]))
# calculating superoperator from nnDCP
sup_from_pred_DCP = integrate_lind(pred_DCP[0], tf_result, params)
print("sanity check")
acceptable_error = fidelity_err([origin_superoperator, sup_from_pred_DCP], params.dim, tf_result)
print("predicted DCP", acceptable_error)
print("---------------------------------")
############################################################################################################
#if sanity test is above assumed error then the experiment is performed
if acceptable_error <= params.accept_err:
iter += 1
# iteration over all coordinates
for (t, c) in prod(range(params.n_ts), range(params.controls_nb)):
new_NCP = origin_NCP
if new_NCP[t, c] < (1 - eps):
new_NCP[t, c] += eps
else:
new_NCP[t, c] -= eps
sup_from_new_NCP = integrate_lind(new_NCP, tf_result, params)
new_DCP = get_prediction(sess, network, x_, keep_prob,
| np.reshape(new_NCP, [1, n_ts, controls_nb]) | numpy.reshape |
import os
import pandas as pd
import numpy as np
from bokeh.io import output_file, show
from bokeh.plotting import figure
from fivepseq.logic.structures.fivepseq_counts import CountManager, FivePSeqCounts
from viz.bokeh_plots import bokeh_scatter_plot, bokeh_transcript_scatter_plot
dir_5pseq_human = "/proj/sllstore2017018/lilit/5pseq_human"
transcript_assembly = pd.read_csv(
os.path.join(dir_5pseq_human, "fivepseq_Hela-rep1", "transcript_assembly.txt"),
sep="\t")
transcript_count_full_Hela_rep1_dict = {
"Hela-rep1": CountManager.read_counts_as_list(
os.path.join(dir_5pseq_human, "fivepseq_Hela-rep1", "counts_FULL_LENGTH.txt")),
"HelaCHX-rep1": CountManager.read_counts_as_list(
os.path.join(dir_5pseq_human, "fivepseq_HelaCHX-rep1", "counts_FULL_LENGTH.txt")),
"HelaFrag-rep1": CountManager.read_counts_as_list(
os.path.join(dir_5pseq_human, "fivepseq_HelaFrag-rep1", "counts_FULL_LENGTH.txt"))
}
HelaCHX_diff = [[]] * len(transcript_count_full_Hela_rep1_dict.get("HelaCHX-rep1"))
HelaUnt_diff = [[]] * len(transcript_count_full_Hela_rep1_dict.get("Hela-rep1"))
count_vector_list_frag = transcript_count_full_Hela_rep1_dict.get("HelaFrag-rep1")
count_vector_list_unt = transcript_count_full_Hela_rep1_dict.get("Hela-rep1")
count_vector_list_chx = transcript_count_full_Hela_rep1_dict.get("HelaCHX-rep1")
for i in range(0, len(transcript_assembly)):
count_vector_unt = count_vector_list_unt[i]
count_vector_chx = count_vector_list_chx[i]
count_vector_frag = count_vector_list_frag[i]
diff_vector_unt = np.array(count_vector_unt) - np.array(count_vector_frag)
diff_vector_unt[diff_vector_unt < 0] = 0
HelaUnt_diff[i] = list(diff_vector_unt)
diff_vector_chx = | np.array(count_vector_chx) | numpy.array |
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from .base_representation import BaseRepresentation
from ..data_readers import BaseReader
class RegressionRepresentation(BaseRepresentation):
def __init__(self, identifier='', value=None):
super().__init__(identifier)
self.value = value
class RegressionAnnotation(RegressionRepresentation):
pass
class RegressionPrediction(RegressionRepresentation):
def to_annotation(self, **kwargs):
return RegressionAnnotation(self.identifier, self.value)
class GazeVectorRepresentation(RegressionRepresentation):
def __init__(self, identifier='', value=None):
if value is None:
value = np.array([])
super().__init__(identifier, value)
class GazeVectorAnnotation(GazeVectorRepresentation):
pass
class GazeVectorPrediction(GazeVectorRepresentation):
def to_annotation(self, **kwargs):
return GazeVectorAnnotation(self.identifier, self.value)
class FacialLandmarksRepresentation(BaseRepresentation):
def __init__(self, identifier='', x_values=None, y_values=None):
super().__init__(identifier)
self.x_values = x_values if x_values is not None else []
self.y_values = y_values if y_values is not None else []
class FacialLandmarksAnnotation(FacialLandmarksRepresentation):
@property
def interocular_distance(self):
left_eye = [
np.mean(self.x_values[self.metadata['left_eye']]),
np.mean(self.y_values[self.metadata['left_eye']])
]
right_eye = [
np.mean(self.x_values[self.metadata['right_eye']]),
np.mean(self.y_values[self.metadata['right_eye']])
]
return np.linalg.norm((np.subtract(left_eye, right_eye)))
class FacialLandmarksPrediction(FacialLandmarksRepresentation):
pass
class FacialLandmarks3DRepresentation(BaseRepresentation):
def __init__(self, identifier='', x_values=None, y_values=None, z_values=None):
super().__init__(identifier)
self.x_values = x_values if x_values is not None else []
self.y_values = y_values if y_values is not None else []
self.z_values = z_values if z_values is not None else []
class FacialLandmarks3DAnnotation(FacialLandmarks3DRepresentation, FacialLandmarksAnnotation):
def __init__(self, identifier='', x_values=None, y_values=None, z_values=None, face_mask=None):
super().__init__(identifier, x_values, y_values, z_values)
self.face_mask = face_mask
def normalization_coef(self, is_2d=False):
if self.face_mask is None:
min_x, max_x = np.min(self.x_values), np.max(self.x_values)
min_y, max_y = np.min(self.y_values), np.max(self.y_values)
min_z, max_z = np.min(self.z_values), np.max(self.z_values)
else:
face_vertices_x = self.x_values[self.face_mask > 0]
face_vertices_y = self.y_values[self.face_mask > 0]
face_vertices_z = self.x_values[self.face_mask > 0]
min_x, max_x = np.min(face_vertices_x), | np.max(face_vertices_x) | numpy.max |
import random
import numpy as np
import torch
from PIL import Image, ImageOps, ImageFilter
from collections import namedtuple
Pair = namedtuple('Pair', ['image', 'mask'])
def HorizontalFlip(pair, probability):
'''
Horizontal Flip image and mask.
'''
if random.random() > probability:
return pair
image = pair.image.transpose(Image.FLIP_TOP_BOTTOM)
mask = pair.mask.transpose(Image.FLIP_TOP_BOTTOM)
return Pair(image, mask)
def VerticalFlip(pair, probability):
'''
Vertical Flip image and mask.
'''
if random.random() > probability:
return pair
image = pair.image.transpose(Image.FLIP_LEFT_RIGHT)
mask = pair.mask.transpose(Image.FLIP_LEFT_RIGHT)
return Pair(image, mask)
def Scale(pair, size):
'''
Scale image and mask.
parameters:
size: (2-tuple), (width, height)
'''
image = pair.image.resize(size, Image.BILINEAR)
mask = pair.mask.resize(size, Image.BILINEAR)
return Pair(image, mask)
def Translation(pair, factor, probability):
'''
Translate image and mask by factor.
parameters:
factor: (float), must be in [0, 1].
return:
The original image's (width, height) * factor from the top-left corner.
'''
if random.random() > probability:
return pair
image_shape = pair.image.size
corner_x = int(image_shape[0] * factor)
corner_y = int(image_shape[1] * factor)
temp = | np.asarray(pair.image) | numpy.asarray |
import pandas as pd
import numpy as np
import tensorflow as tf
import os, pickle
class Reader(object):
def read(self, data_path):
self.read_data()
self.merge_id()
self.add_reverse()
if self.args.reindex:
self.reindex_kb()
self.gen_t_label()
self._ent_num = self._entity_num
self._rel_num = self._relation_num
self._ent_mapping = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._rel_mapping = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._ent_testing = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self._rel_testing = pd.DataFrame({'kb_1':{}, 'kb_2':{}})
self.gen_filter_mat()
self._kb = self._train_data
return
def read_data(self):
pass
def merge_id(self):
self._train_data['h_id'] = self._e_id[self._train_data.h].values
self._train_data['r_id'] = self._r_id[self._train_data.r].values
self._train_data['t_id'] = self._e_id[self._train_data.t].values
self._test_data['h_id'] = self._e_id[self._test_data.h].values
self._test_data['r_id'] = self._r_id[self._test_data.r].values
self._test_data['t_id'] = self._e_id[self._test_data.t].values
self._valid_data['h_id'] = self._e_id[self._valid_data.h].values
self._valid_data['r_id'] = self._r_id[self._valid_data.r].values
self._valid_data['t_id'] = self._e_id[self._valid_data.t].values
def gen_t_label(self):
full = pd.concat([self._train_data, self._test_data, self._valid_data], ignore_index=True)
f_t_labels = full['t_id'].groupby([full['h_id'], full['r_id']]).apply(lambda x: pd.unique(x.values))
f_t_labels.name = 't_label'
self._test_data = self._test_data.join(f_t_labels, on=['h_id', 'r_id'])
self._valid_data = self._valid_data.join(f_t_labels, on=['h_id', 'r_id'])
def add_reverse(self):
def add_reverse_for_data(data):
reversed_data = data.rename(columns={'h_id': 't_id', 't_id': 'h_id'})
reversed_data.r_id += self._relation_num
data = pd.concat(([data, reversed_data]), ignore_index=True, sort=False)
return data
self._train_data = add_reverse_for_data(self._train_data)
self._test_data = add_reverse_for_data(self._test_data)
self._valid_data = add_reverse_for_data(self._valid_data)
self._relation_num_for_eval = self._relation_num
self._relation_num *= 2
def reindex_kb(self):
train_data = self._train_data
test_data = self._test_data
valid_data = self._valid_data
eids = pd.concat([train_data.h_id, train_data.t_id, self._e_id], ignore_index=True)
tv_eids = np.unique(pd.concat([test_data.h_id, test_data.t_id, valid_data.t_id, valid_data.h_id]))
not_train_eids = tv_eids[~np.in1d(tv_eids, eids)]
rids = pd.concat([train_data.r_id, pd.Series(np.arange(self._relation_num))],ignore_index=True)
def gen_map(eids, rids):
e_num = eids.groupby(eids.values).size().sort_values()[::-1]
not_train = pd.Series(np.zeros_like(not_train_eids), index=not_train_eids)
e_num = pd.concat([e_num, not_train])
r_num = rids.groupby(rids.values).size().sort_values()[::-1]
e_map = pd.Series(range(e_num.shape[0]), index=e_num.index)
r_map = pd.Series(range(r_num.shape[0]), index=r_num.index)
return e_map, r_map
def remap_kb(kb, e_map, r_map):
kb.loc[:, 'h_id'] = e_map.loc[kb.h_id.values].values
kb.loc[:, 'r_id'] = r_map.loc[kb.r_id.values].values
kb.loc[:, 't_id'] = e_map.loc[kb.t_id.values].values
return kb
def remap_id(s, rm):
s = rm.loc[s.values].values
return s
e_map, r_map = gen_map(eids, rids)
self._e_map, self._r_map = e_map, r_map
self._train_data = remap_kb(train_data, e_map, r_map)
self._valid_data = remap_kb(self._valid_data, e_map, r_map)
self._test_data = remap_kb(self._test_data, e_map, r_map)
self._e_id = remap_id(self._e_id, e_map)
self._r_id = remap_id(self._r_id, r_map)
return not_train_eids
def in2d(self, arr1, arr2):
"""Generalisation of numpy.in1d to 2D arrays"""
assert arr1.dtype == arr2.dtype
arr1_view = np.ascontiguousarray(arr1).view(np.dtype((np.void,
arr1.dtype.itemsize * arr1.shape[1])))
arr2_view = np.ascontiguousarray(arr2).view(np.dtype((np.void,
arr2.dtype.itemsize * arr2.shape[1])))
intersected = np.in1d(arr1_view, arr2_view)
return intersected.view(np.bool).reshape(-1)
def gen_filter_mat(self):
def sp_gen_filter_mat(t_label):
rows, cols = [], []
for row, tails in enumerate(t_label):
rows += list(np.repeat(row, repeats=len(tails)))
cols += list(tails)
return (rows, cols)
self._tail_valid_filter_mat = sp_gen_filter_mat(self._valid_data.t_label)
self._tail_test_filter_mat = sp_gen_filter_mat(self._test_data.t_label)
def gen_label_mat_for_train(self):
def gen_train_relation_label_vac(r):
c = pd.value_counts(r)
values = 1. * c.values / c.sum()
return np.stack([c.index, values], axis=1)
def gen_train_entity_label_vac(r):
indices = | np.stack([r.label_id.values, r.values], axis=1) | numpy.stack |
"""
Name: diffusion_functions
Purpose: Contains functions to calculate diffusion of distributed wind model
(1) Determine maximum market size as a function of payback time;
(2) Parameterize Bass diffusion curve with diffusion rates (p, q) set by
payback time;
(3) Determine current stage (equivaluent time) of diffusion based on
existing market and current economics; and
(3) Calculate new market share by stepping forward on diffusion curve.
"""
import numpy as np
import pandas as pd
import config
import utility_functions as utilfunc
import decorators
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#=============================================================================
# ^^^^ Diffusion Calculator ^^^^
@decorators.fn_timer(logger = logger, tab_level = 2, prefix = '')
def calc_diffusion_solar(df, is_first_year, bass_params, year,
override_p_value = None, override_q_value = None, override_teq_yr1_value = None):
"""
Calculates the market share (ms) added in the solve year. Market share must be less
than max market share (mms) except initial ms is greater than the calculated mms.
For this circumstance, no diffusion allowed until mms > ms. Also, do not allow ms to
decrease if economics deterioriate. Using the calculated
market share, relevant quantities are updated.
IN: df - pd dataframe - Main dataframe
OUT: df - pd dataframe - Main dataframe
market_last_year - pd dataframe - market to inform diffusion in next year
"""
df = df.reset_index()
bass_params = bass_params[bass_params['tech']=='solar']
# set p/q/teq_yr1 params
df = pd.merge(df, bass_params[['state_abbr', 'bass_param_p', 'bass_param_q', 'teq_yr1', 'sector_abbr']], how = 'left', on = ['state_abbr','sector_abbr'])
# calc diffusion market share
df = calc_diffusion_market_share(df, is_first_year)
# market share floor is based on last year's market share
df['market_share'] = np.maximum(df['diffusion_market_share'], df['market_share_last_year'])
# calculate the "new" market share (old - current)
df['new_market_share'] = df['market_share'] - df['market_share_last_year']
# cap the new_market_share where the market share exceeds the max market share
df['new_market_share'] = np.where(df['market_share'] > df['max_market_share'], 0, df['new_market_share'])
# calculate new adopters, capacity and market value
df['new_adopters'] = df['new_market_share'] * df['developable_agent_weight']
df['new_market_value'] = df['new_adopters'] * df['system_kw'] * df['system_capex_per_kw']
df['new_system_kw'] = df['new_adopters'] * df['system_kw']
df['new_batt_kw'] = df['new_adopters'] * df['batt_kw']
df['new_batt_kwh'] = df['new_adopters'] * df['batt_kwh']
# then add these values to values from last year to get cumulative values:
df['number_of_adopters'] = df['adopters_cum_last_year'] + df['new_adopters']
df['market_value'] = df['market_value_last_year'] + df['new_market_value']
df['system_kw_cum'] = df['system_kw_cum_last_year'] + df['new_system_kw']
df['batt_kw_cum'] = df['batt_kw_cum_last_year'] + df['new_batt_kw']
df['batt_kwh_cum'] = df['batt_kwh_cum_last_year'] + df['new_batt_kwh']
# constrain state-level capacity totals to known historical values
if year in (2014, 2016, 2018):
group_cols = ['state_abbr', 'sector_abbr', 'year']
state_capacity_total = (df[group_cols+['system_kw_cum', 'batt_kw_cum', 'batt_kwh_cum', 'agent_id']].groupby(group_cols)
.agg({'system_kw_cum':'sum', 'batt_kw_cum':'sum', 'batt_kwh_cum':'sum', 'agent_id':'count'})
.rename(columns={'system_kw_cum':'state_solar_kw_cum', 'batt_kw_cum':'state_batt_kw_cum', 'batt_kwh_cum':'state_batt_kwh_cum', 'agent_id':'agent_count'})
.reset_index())
# coerce dtypes
state_capacity_total.state_solar_kw_cum = state_capacity_total.state_solar_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kw_cum = state_capacity_total.state_batt_kw_cum.astype(np.float64)
state_capacity_total.state_batt_kwh_cum = state_capacity_total.state_batt_kwh_cum.astype(np.float64)
df.system_kw_cum = df.system_kw_cum.astype(np.float64)
df.batt_kw_cum = df.batt_kw_cum.astype(np.float64)
df.batt_kwh_cum = df.batt_kwh_cum.astype(np.float64)
# merge state totals back to agent df
df = pd.merge(df, state_capacity_total, how = 'left', on = ['state_abbr', 'sector_abbr', 'year'])
# read csv of historical capacity values by state and sector
historical_state_df = pd.read_csv(config.OBSERVED_DEPLOYMENT_BY_STATE)
# join historical data to agent df
df = pd.merge(df, historical_state_df, how='left', on=['state_abbr', 'sector_abbr', 'year'])
# calculate scale factor - weight that is given to each agent based on proportion of state total
# where state cumulative capacity is 0, proportion evenly to all agents
df['solar_scale_factor'] = np.where(df['state_solar_kw_cum'] == 0, 1.0/df['agent_count'], df['system_kw_cum'] / df['state_solar_kw_cum'])
df['batt_mw_scale_factor'] = np.where(df['state_batt_kw_cum'] == 0, 1.0/df['agent_count'], df['batt_kw_cum'] / df['state_batt_kw_cum'])
df['batt_mwh_scale_factor'] = np.where(df['state_batt_kwh_cum'] == 0, 1.0/df['agent_count'], df['batt_kwh_cum'] / df['state_batt_kwh_cum'])
# use scale factor to constrain agent capacity values to historical values
df['system_kw_cum'] = df['solar_scale_factor'] * df['observed_solar_mw'] * 1000.
df['batt_kw_cum'] = df['batt_mw_scale_factor'] * df['observed_storage_mw'] * 1000.
df['batt_kwh_cum'] = df['batt_mwh_scale_factor'] * df['observed_storage_mwh'] * 1000.
# recalculate number of adopters using anecdotal values
df['number_of_adopters'] = np.where(df['sector_abbr'] == 'res', df['system_kw_cum']/5.0, df['system_kw_cum']/100.0)
# recalculate market share
df['market_share'] = np.where(df['developable_agent_weight'] == 0, 0.0,
df['number_of_adopters'] / df['developable_agent_weight'])
df['market_share'] = df['market_share'].astype(np.float64)
df.drop(['agent_count',
'state_solar_kw_cum','state_batt_kw_cum','state_batt_kwh_cum',
'observed_solar_mw','observed_storage_mw','observed_storage_mwh',
'solar_scale_factor','batt_mw_scale_factor','batt_mwh_scale_factor'], axis=1, inplace=True)
market_last_year = df[['agent_id',
'market_share','max_market_share','number_of_adopters',
'market_value','initial_number_of_adopters','initial_pv_kw','initial_batt_kw','initial_batt_kwh',
'initial_market_share','initial_market_value',
'system_kw_cum','new_system_kw',
'batt_kw_cum','new_batt_kw',
'batt_kwh_cum','new_batt_kwh']]
market_last_year.rename(columns={'market_share':'market_share_last_year',
'max_market_share':'max_market_share_last_year',
'number_of_adopters':'adopters_cum_last_year',
'market_value': 'market_value_last_year',
'system_kw_cum':'system_kw_cum_last_year',
'batt_kw_cum':'batt_kw_cum_last_year',
'batt_kwh_cum':'batt_kwh_cum_last_year'}, inplace=True)
return df, market_last_year
#=============================================================================
# ^^^^ Diffusion Calculator ^^^^
@decorators.fn_timer(logger = logger, tab_level = 3, prefix = '')
def calc_diffusion(df, cur, con, techs, choose_tech, sectors, schema, is_first_year,
bass_params, override_p_value = None, override_q_value = None, override_teq_yr1_value = None):
"""
Calculates the market share (ms) added in the solve year. Market share must be less
than max market share (mms) except initial ms is greater than the calculated mms.
For this circumstance, no diffusion allowed until mms > ms. Also, do not allow ms to
decrease if economics deterioriate. Using the calculated
market share, relevant quantities are updated.
IN: df - pd dataframe - Main dataframe
OUT: df - pd dataframe - Main dataframe
market_last_year - pd dataframe - market to inform diffusion in next year
"""
logger.info("\t\tCalculating Diffusion")
# set p/q/teq_yr1 params
df = set_bass_param(df, bass_params, override_p_value, override_q_value, override_teq_yr1_value)
# calc diffusion market share
df = calc_diffusion_market_share(df, is_first_year)
# ensure no diffusion for non-selected options
df['diffusion_market_share'] = df['diffusion_market_share'] * df['selected_option']
# market share floor is based on last year's market share
df['market_share'] = np.maximum(df['diffusion_market_share'], df['market_share_last_year'])
# if in tech choice mode, ensure that total market share doesn't exceed 1
if choose_tech == True:
# extract out the rows for unselected technologies
market_share_cap = df[df['selected_option'] == False][['county_id', 'bin_id', 'sector_abbr', 'market_share']].groupby(['county_id', 'bin_id', 'sector_abbr']).sum().reset_index()
# determine how much market share is allowable based on 1 - the MS of the unselected techs
market_share_cap['market_share_cap'] = 1 - market_share_cap['market_share']
# drop the market share column
market_share_cap.drop('market_share', inplace = True, axis = 1)
# merge to df
df = pd.merge(df, market_share_cap, how = 'left', on = ['county_id', 'bin_id', 'sector_abbr'])
# cap the market share (for the selected option only)
df['market_share'] = np.where(df['selected_option'] == True, np.minimum(df['market_share'], df['market_share_cap']), df['market_share'])
# drop the market share cap field
df.drop('market_share_cap', inplace = True, axis = 1)
# calculate the "new" market share (old - current)
df['new_market_share'] = df['market_share'] - df['market_share_last_year']
# cap the new_market_share where the market share exceeds the max market share
df['new_market_share'] = | np.where(df['market_share'] > df['max_market_share'], 0, df['new_market_share']) | numpy.where |
import numpy as np
import cvxpy as cvx
import util
def set_contains_array(S, a):
"""
:param S: list of np.ndarray
:param a: np.ndarray
:return: contains, 0 or 1
"""
contains = 0
for b in S:
if not (a - b).any(): # if a contained in S
contains = 1
return contains
def set_sum_two(A, B):
"""
:param A: list of np.ndarray
:param B: list of np.ndarray
:return: list of np.ndarray
"""
C = []
for a in A:
for b in B:
if not set_contains_array(C, a + b):
C.append(a + b)
return C
def set_sum_list(Omega):
"""
Set sum of multiple set of np.ndarray
:param Omega: list of list of np.ndarray
:return: list of np.ndarray
"""
S = Omega[0]
# print 'len(Omega) =', len(Omega)
# print 0, 'S =', S
for i in range(1, len(Omega)):
# print i, 'Omega[i] =',Omega[i]
S = set_sum_two(S, Omega[i])
# print i, 'S =', S
return S
def pointwise_dominate(w, U):
"""
Test if w is point-wise dominated by all u in U
:param w: np.ndarray
:param U: list of np.ndarray
:return:
"""
for u in U:
if np.all(w < u):
return True
return False
def lp_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With LP in White & Clark
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
# print("LP dominate")
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [b.T*(w-u) >= d for u in U] + [np.sum(b) == 1]
prob = cvx.Problem(objective, constraints)
result = prob.solve()
# print("d =", d.value)
if d.value >= 0:
return np.ravel(b.value)
else:
return None
def dec_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With Bender's decomposition (Walraven & Spaan, 2017)
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [np.sum(b) == 1]
b_ = np.random.random(S)
b_ = b_ / np.sum(b_)
U_ = []
while 1:
_b = b_
u_ = U[np.argmin([np.dot((w - U[i]), _b) for i in range(len(U))])]
constraints += [d <= b.T*(w-u_)]
U_.append(u_)
prob = cvx.Problem(objective, constraints)
_ = prob.solve()
b_ = np.ravel(b.value)
if not (b_ - _b).any():
break
if d.value >= 0:
return _b
else:
return None
def lex_less(u, w):
if w is None:
return False
for i in range(len(u)):
if u[i] > w[i]:
return False
return True
def best_point(b, U):
# print("Find best")
_max = -np.inf
w = None
for i in range(len(U)):
u = U[i]
# print("b", b)
# print("u", u)
x = np.dot(b, u)
# print("x", x)
if x > _max or (x == _max and lex_less(u, U[w])):
w = i
_max = x
# print("max", _max)
return w
def prune(W, A=None):
# print("prune", W)
D, E = [], []
while len(W) > 0:
w = W[-1]
if pointwise_dominate(w, D):
W.pop()
else:
# b = lp_dominate(w, D)
b = dec_dominate(w, D)
if b is None:
W.pop()
else:
i = best_point(b, W)
D.append(W[i])
if A is not None:
E.append(A[i])
W.pop(i)
if A is not None:
return D, E
else:
return D
def set_union(V):
V_ = []
for v in V:
V_ += v
return V_
class POMDP:
def __init__(self, P=None, Z=None, R=None, g=None, alpha=1.0):
self.P = P # m x n x n: a(t)->s(t)->s(t+1)
self.Z = Z # m x n x k: a(t)->s(t+1)->o(t+1)
self.R = R # m x n x n: a(t)->s(t+1)->s(t+1)
self.g = g # n x 1: s(T)
self.alpha = alpha # discount factor
self.nActions = self.Z.shape[0] # m
self.nStates = self.Z.shape[1] # n
self.nLevels = self.Z.shape[2] # k
if g is None:
self.g = np.zeros(self.nStates)
# print self.nActions, self.nStates, self.nLevels
def update_belief(self, b, a, o):
p = self.Z[a, :, o] * self.P[a].T.dot(b)
return p / p.sum()
def monahan_enumeration(self, V):
"""construct the set of Omega
:param V: input list of alpha vectors
"""
V_, A_ = [], []
for a in range(self.nActions):
# print("Action", a)
Va = []
_r = np.sum(self.P[a] * self.R[a], axis=1) / self.nLevels
# print("_r:", _r)
for z in range(self.nLevels):
# print("Obs", z)
Vaz = [_r + self.alpha * (self.Z[a,:,z] * v).dot(self.P[a]) for v in V]
# print("Vaz", Vaz)
if len(Va) > 0:
Va = prune(set_sum_two(Va, Vaz)) # incremental pruning
else:
Va = Vaz
A_ += [a for _ in Va]
V_ += Va
V_, A_ = prune(V_, A_)
return V_, A_
def transition(self, a, s):
return np.random.choice(self.nStates, p=self.P[a, s])
def emmission(self, a, s):
return np.random.choice(self.nStates, p=self.Z[a, s])
@staticmethod
def optimal_action(b, V, A):
assert len(V) == len(A)
values = [np.dot(b, v) for v in V]
opt_idx = np.argmax(values)
return A[opt_idx], V[opt_idx]
def solve(self, T):
V = self.g
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T-1-t] = V
Actions[T-1-t] = A
return Values, Actions
def plan(self, T, initial_belief=None, perform=False):
V = self.g
if initial_belief is None:
initial_belief = np.ones(self.nStates) / self.nStates
b = initial_belief
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T - 1 - t] = V
Actions[T - 1 - t] = A
a0, v0 = self.optimal_action(b, Values[0], Actions[0])
if not perform:
return a0, v0
s = np.random.choice(self.nStates, p=b)
actions, states, observations, reward = [], [], [], 0.0
for t in range(T):
a, v = self.optimal_action(b, Values[t], Actions[t])
# print('a', a)
# print('v', v)
_s = s
s = self.transition(a, s)
o = self.transition(a, s)
b = self.update_belief(b, a, o)
states.append(_s)
actions.append(s)
observations.append(o)
reward += self.R[a, _s, s] * self.alpha ** t
return a0, v0, actions, states, observations, reward
def test_pomdp(nActions, nStates, nLevels, alpha):
# P = np.array([
# [[0.25, 0.75], [0.6 , 0.4 ]],
# [[0.5 , 0.5 ], [0.7 , 0.3 ]]])
# Z = np.array([
# [[0.55, 0.45], [0.3 , 0.7 ]],
# [[0.65, 0.35], [0.25, 0.75]]])
# R = np.array([
# [[2., 2. ], [ 0., 0.]],
# [[3., 3. ], [-1., -1.]]])
# g = np.array([2., -1.])
P = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
Z = util.normalize(np.random.random(size=(nActions, nStates, nLevels)), axis=2)
R = util.normalize( | np.random.random(size=(nActions, nStates, nStates)) | numpy.random.random |
import argparse
import gc
import json
import os
import pickle
import pprint
import sys
import time
from datetime import datetime
from timeit import default_timer as timer
from pathlib import Path
from pdb import set_trace as TT
from random import randint
import cv2
from typing import Tuple
import gym
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import psutil
import ray
import scipy
import torch as th
import torch.nn.functional as F
from gym import envs
from numba import njit
from qdpy.phenotype import Fitness, Features
from ribs.archives import GridArchive
from ribs.archives._add_status import AddStatus
from ribs.emitters import (
GradientImprovementEmitter,
ImprovementEmitter,
OptimizingEmitter,
)
from ribs.emitters.opt import CMAEvolutionStrategy
from ribs.optimizers import Optimizer
from ribs.visualize import grid_archive_heatmap
from torch import ByteTensor, Tensor, nn
from torch.nn import Conv2d, CrossEntropyLoss, Linear
from torch.utils.tensorboard import SummaryWriter
import deap
import deap.tools
import deap.algorithms
import qdpy
from qdpy import algorithms, containers, benchmarks, plots, tools
from deap.base import Toolbox
import graphviz
import warnings
import copy
# Use for .py file
from tqdm import tqdm
import gym_pcgrl
from evo_args import get_args
from gym_pcgrl.envs.helper import get_int_prob, get_string_map
# from example_play_call import random_player
# gvgai_path = '/home/sme/GVGAI_GYM/'
# sys.path.insert(0,gvgai_path)
# from play import play
# Use for notebook
# from tqdm.notebook import tqdm
# Use print to confirm access to local pcgrl gym
# print([env.id for env in envs.registry.all() if "gym_pcgrl" in env.entry_point])
"""
/// Required Environment ///
conda create -n ribs-pt python=3.7
pip install scipy==1.2.0 # must use this version with GVGAI_GYM
conda install -c conda-forge notebook
conda install pytorch torchvision torchaudio -c pyth
conda install tensorboard
pip install 'ribs[all]' gym~=0.17.0 Box2D~=2.3.10 tqdm
git clone https://github.com/amidos2006/gym-pcgrl.git
cd gym-pcgrl # Must run in project root folder for access to pcgrl modules
/// Instructions ///
To start TensorBoard run the following command:
$ tensorboard --logdir=runs
Then go to:
http://localhost:6006
/// Resources ///
Sam's example code:
https://github.com/smearle/gol-cmame/blob/master/gol_cmame.py
PCGRL Repo:
https://github.com/amidos2006/gym-pcgrl
Neural CA Paper:
https://arxiv.org/pdf/2009.01398.pdf
RIBS examples:
https://docs.pyribs.org/en/stable/tutorials/lunar_lander.html
"""
TARGETS_PENALTY_WEIGHT = 10
def draw_net(config: object, genome: object, view: object = False, filename: object = None, node_names: object = None, show_disabled: object = True,
prune_unused: object = False,
node_colors: object = None, fmt: object = 'svg') -> object:
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add(cg.key)
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled', 'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
def save_level_frames(level_frames, model_name):
renders_dir = os.path.join(SAVE_PATH, "renders")
if not os.path.isdir(renders_dir):
os.mkdir(renders_dir)
model_dir = os.path.join(renders_dir, "model_{}".format(model_name))
if not os.path.isdir(model_dir):
os.mkdir(model_dir)
for j, im in enumerate(level_frames):
im.save(
os.path.join(
model_dir, "frame_{:0>4d}.png".format(j)
)
)
def get_qd_score(archive, env, bc_names):
max_loss = env.get_max_loss(ctrl_metrics=bc_names)
max_loss = max_loss * TARGETS_PENALTY_WEIGHT
if ALGO == 'ME':
# qd_score = archive.qd_score() # we need to specify lower *and upper* bounds for this
# TODO: work out max diversity bonus to make this possible ?? Would this bias scores between n. latent seeds
# though?
qd_score = np.nansum(archive.quality_array + max_loss)
else:
df = archive.as_pandas(include_solutions=False)
qd_score = (df['objective'] + max_loss).sum()
return qd_score
def save_train_stats(objs, archive, env, bc_names, itr=None):
train_time_stats = {
"qd_score": get_qd_score(archive, env, bc_names),
"objective": get_stats(objs),
}
if itr is not None:
save_path = os.path.join(SAVE_PATH, "checkpoint_{}".format(itr))
else:
save_path = SAVE_PATH
json.dump(
train_time_stats,
open(os.path.join(save_path, "train_time_stats.json"), "w"),
indent=4,
)
def get_stats(stats):
"""Take 1D numpy array of data and return some fun facts in the form of a dictionary."""
return {
"mean": np.nanmean(stats),
"std": np.nanstd(stats),
"max": np.nanmax(stats),
"min": np.nanmin(stats),
}
def save_grid(csv_name="levels", d=4):
fontsize = 32
if "zelda" in PROBLEM:
d = 3
fontsize = int(fontsize * d / 4)
elif "smb" in PROBLEM:
d = 4
if CMAES:
# TODO: implement me
return
# save grid using csv file
# get path to CSV
levels_path = os.path.join(SAVE_PATH, csv_name + ".csv")
# get env name
env_name = "{}-{}-v0".format(PROBLEM, REPRESENTATION)
# create env
env = gym.make(env_name)
map_width = env._prob._width
df = pd.read_csv(levels_path, header=0, skipinitialspace=True)
# .rename(
# index=str,
# header=0,
# columns={
# 0: "level",
# 1: "batch_reward",
# 2: "variance",
# 3: "diversity",
# 4: "targets",
# },
# )
bc_names = []
for i in range(5, 7): # assume 2 BCs
bc_names.append(df.columns[i])
# look for the most valid levels
targets_thresh = 0.0
og_df = df
df = og_df[og_df['targets'] == targets_thresh]
last_len = len(df)
while len(df) < d**2 and targets_thresh > og_df['targets'].min():
last_len = len(df)
# Raise the threshold so it includes at least one more individual
targets_thresh = og_df[og_df['targets'] < targets_thresh]['targets'].max()
df = og_df[og_df['targets'] >= targets_thresh]
# d = 6 # dimension of rows and columns
figw, figh = 16.0, 16.0
fig = plt.figure()
fig, axs = plt.subplots(ncols=d, nrows=d, figsize=(figw, figh))
df_g = df.sort_values(by=bc_names, ascending=False)
df_g["row"] = np.floor(np.linspace(0, d, len(df_g), endpoint=False)).astype(int)
for row_num in range(d):
row = df_g[df_g["row"] == row_num]
row = row.sort_values(by=[bc_names[1]], ascending=True)
row["col"] = np.arange(0, len(row), dtype=int)
idx = np.floor(np.linspace(0, len(row) - 1, d)).astype(int)
row = row[row["col"].isin(idx)]
row = row.drop(["row", "col"], axis=1)
# grid_models = np.array(row.loc[:,'solution_0':])
grid_models = row["level"].tolist()
for col_num in range(len(row)):
axs[row_num, col_num].set_axis_off()
level = np.zeros((map_width, map_width), dtype=int)
for i, l_rows in enumerate(grid_models[col_num].split("], [")):
for j, l_col in enumerate(l_rows.split(",")):
level[i, j] = int(
l_col.replace("[", "").replace("]", "").replace(" ", "")
)
# Set map
env._rep._x = env._rep._y = 0
env._rep._map = level
img = env.render(mode="rgb_array")
# axs[row_num, col_num].imshow(img, aspect="auto")
axs[-col_num-1, -row_num-1].imshow(img, aspect="auto")
fig.subplots_adjust(hspace=0.01, wspace=0.01)
levels_png_path = os.path.join(SAVE_PATH, "{}_grid.png".format(csv_name))
fig.text(0.5, 0.01, bc_names[0], ha='center', va='center',fontsize=fontsize)
fig.text(0.01, 0.5, bc_names[1], ha='center', va='center', rotation='vertical', fontsize=fontsize)
plt.tight_layout(rect=[0.025, 0.025, 1, 1])
fig.savefig(levels_png_path, dpi=300)
plt.close()
def auto_garbage_collect(pct=80.0):
if psutil.virtual_memory().percent >= pct:
gc.collect()
def tran_action(action, **kwargs):
skip = False
# return action, skip
return action.swapaxes(1, 2), skip
# usually, if action does not turn out to change the map, then the episode is terminated
# the skip boolean tells us whether, for some representation-specific reason, the agent has chosen not to act, but
# without ending the episode
@njit
def id_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
# the argmax along tile_type dimension is performed inside the representation's update function
skip = False
return action, skip
# @njit
def wide_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
# only consider tiles where the generator suggests something different than the existing tile
act_mask = action.argmax(axis=0) != int_map
n_new_builds = np.sum(act_mask)
act_mask = act_mask.reshape((1, *act_mask.shape))
# action = action * act_mask
action = np.where(act_mask == False, action.min() - 10, action)
coords = np.unravel_index(action.argmax(), action.shape)
if n_new_builds > 0:
assert act_mask[0, coords[1], coords[2]] == 1
coords = coords[2], coords[1], coords[0]
# assert int_map[coords[0], coords[1]] != coords[2]
skip = False
return coords, skip
@njit
def narrow_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action[:, y, x].argmax()
if act == 0:
skip = True
else:
skip = False
return act, skip
@njit
def turtle_action(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action[:, y, x].argmax()
# moving is counted as a skip, so lack of change does not end episode
if act < n_dirs:
skip = True
else:
skip = False
return act, skip
@njit
def flat_to_box(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
action = action.reshape((n_tiles, *int_map.shape))
skip = False
return action, skip
@njit
def flat_to_wide(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
w = int_map.shape[0]
h = int_map.shape[1]
assert len(action) == int_map.shape[0] + int_map.shape[1] + n_tiles
action = (action[:w].argmax(), action[w : w + h].argmax(), action[w + h :].argmax())
skip = False
return action, skip
@njit
def flat_to_narrow(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action.argmax()
if act == 0:
skip = True
else:
skip = False
return act, skip
@njit
def flat_to_turtle(action, int_map=None, n_tiles=None, x=None, y=None, n_dirs=None):
act = action.argmax()
if act < n_dirs:
skip = True
else:
skip = False
return act, skip
preprocess_action_funcs = {
"NCA": {
"cellular": id_action,
"wide": wide_action,
"narrow": narrow_action,
"turtle": turtle_action,
},
"CPPN": {
"cellular": tran_action,
},
"CNN": {
# will try to build this logic into the model
"cellular": flat_to_box,
"wide": flat_to_wide,
"narrow": flat_to_narrow,
"turtle": flat_to_turtle,
},
}
def id_observation(obs, **kwargs):
return obs
def local_observation(obs, **kwargs):
x = kwargs.get("x")
y = kwargs.get("y")
local_obs = | np.zeros((1, obs.shape[1], obs.shape[2])) | numpy.zeros |
import numpy as np
from ... import draw
from ... import math
class SpherePoints(draw.SpherePoints):
__doc__ = draw.SpherePoints.__doc__
def render(self, axes, rotation=(1, 0, 0, 0), size=(2, 2), pixel_scale=64,
zoom=1, **kwargs):
rotation = np.asarray(rotation, dtype=np.float32)
rotated_points = math.quatrot(self.rotation[np.newaxis], self.points)
if self.on_surface:
rotated_points /= np.linalg.norm(rotated_points, axis=-1, keepdims=True)
rotated_points[np.isnan(rotated_points)] = 0
size = np.asarray(size, dtype=np.float32)/zoom
gridsize = int(np.max( | np.asarray(size) | numpy.asarray |
"""Utilities related to reading water cluster networks from disk"""
from typing import Dict
from ase import data
import tensorflow as tf
import networkx as nx
import numpy as np
def infer_water_cluster_bonds(atoms):
"""
Infers the covalent and hydrogen bonds between oxygen and hydrogen atoms in a water cluster.
Definition of a hydrogen bond obtained from https://aip.scitation.org/doi/10.1063/1.2742385
Args:
atoms (ase.Atoms): ASE atoms structure of the water cluster. Atoms list must be ordered
such that the two covalently bound hydrogens directly follow their oxygen.
Returns:
cov_bonds ([(str, str, 'covalent')]): List of all covalent bonds
h_bonds [(str, str, 'hydrogen')]: List of all hydrogen bonds
"""
# Make sure the atoms are in the right order
z = atoms.get_atomic_numbers()
assert z[:2].tolist() == [8, 1], "Atom list not in (O, H, H) format"
coords = atoms.positions
# Get the covalent bonds
# Note: Assumes that each O is followed by 2 covalently-bonded H atoms
cov_bonds = [(i, i + 1, 'covalent') for i in range(0, len(atoms), 3)]
cov_bonds.extend([(i, i + 2, 'covalent') for i in range(0, len(atoms), 3)])
# Get the hydrogen bonds
# Start by getting the normal to each water molecule
q_1_2 = []
for i in range(0, len(atoms), 3):
h1 = coords[i + 1, :]
h2 = coords[i + 2, :]
o = coords[i, :]
q_1_2.append([h1 - o, h2 - o])
v_list = [np.cross(q1, q2) for (q1, q2) in q_1_2]
# Determine which (O, H) pairs are bonded
h_bonds = []
for idx, v in enumerate(v_list): # Loop over each water molecule
for index, both_roh in enumerate(q_1_2): # Loop over each hydrogen
for h_index, roh in enumerate(both_roh):
# Get the index of the H and O atoms being bonded
indexO = 3 * idx
indexH = 3 * index + h_index + 1
# Get the coordinates of the two atoms
h_hbond = coords[indexH, :]
o_hbond = coords[indexO, :]
# Compute whether they are bonded
dist = np.linalg.norm(h_hbond - o_hbond)
if (dist > 1) & (dist < 2.8):
angle = np.arccos(np.dot(roh, v) / (np.linalg.norm(roh) * np.linalg.norm(v))) * (180.0 / np.pi)
if angle > 90.0:
angle = 180.0 - angle
N = np.exp(-np.linalg.norm(dist) / 0.343) * (7.1 - (0.05 * angle) + (0.00021 * (angle ** 2)))
if N >= 0.0085:
h_bonds.append((indexO, indexH, 'hydrogen'))
return cov_bonds, h_bonds
def create_graph(atoms):
"""
Given a ASE atoms object, this function returns a graph structure with following properties.
1) Each graph has two graph-level attributes: actual_energy and predicted_energy
2) Each node represents an atom and has two attributes: label ('O'/'H' for oxygen and hydrogen) and 3-dimensional
coordinates.
3) Each edge represents a bond between two atoms and has two attributes: label (covalent or hydrogen) and distance.
Args:
atoms (Atoms): ASE atoms object
Returns:
(nx.Graph) Networkx representation of the water cluster
"""
# Compute the bonds
cov_bonds, h_bonds = infer_water_cluster_bonds(atoms)
# Add nodes to the graph
graph = nx.Graph()
for i, (coord, Z) in enumerate(zip(atoms.positions, atoms.get_atomic_numbers())):
graph.add_node(i, label=data.chemical_symbols[Z], coords=coord)
# Add the edges
edges = cov_bonds + h_bonds
for a1, a2, btype in edges:
distance = | np.linalg.norm(atoms.positions[a1, :] - atoms.positions[a2, :]) | numpy.linalg.norm |
# License: BSD 3 clause
import gc
import unittest
import weakref
import numpy as np
import scipy
from scipy.sparse import csr_matrix
from tick.array.build.array import tick_double_sparse2d_from_file
from tick.array.build.array import tick_double_sparse2d_to_file
from tick.array_test.build import array_test as test
class Test(unittest.TestCase):
def test_varray_smart_pointer_in_cpp(self):
"""...Test C++ reference counter
"""
vcc = test.VarrayContainer()
self.assertEqual(vcc.nRef(), 0)
vcc.initVarray()
self.assertEqual(vcc.nRef(), 1)
cu1 = test.VarrayUser()
cu1.setArray(vcc)
self.assertEqual(vcc.nRef(), 2)
cu1.setArray(vcc)
self.assertEqual(vcc.nRef(), 2)
cu2 = test.VarrayUser()
cu2.setArray(vcc)
self.assertEqual(vcc.nRef(), 3)
del cu1
self.assertEqual(vcc.nRef(), 2)
cu3 = test.VarrayUser()
cu3.setArray(vcc)
self.assertEqual(vcc.nRef(), 3)
del cu3, cu2
self.assertEqual(vcc.nRef(), 1)
# we cannot check it will go to 0 after vcc deletion in Python
cu4 = test.VarrayUser()
cu4.setArray(vcc)
self.assertEqual(vcc.nRef(), 2)
del vcc
self.assertEqual(cu4.nRef(), 1)
# we cannot check it will go to 0 after cu4 deletion in Python
del cu4
def test_varray_smart_pointer_deletion1(self):
"""...Test that varray is still alive after deletion in Python
"""
vcc = test.VarrayContainer()
vcc.initVarray()
# Now mix with some Python
a = vcc.varrayPtr
# This does not increment C++ reference counter
self.assertEqual(vcc.nRef(), 1)
# Get a weak ref of the array
r = weakref.ref(a)
del a
np.testing.assert_array_almost_equal(r(), vcc.varrayPtr)
del vcc
self.assertIsNone(r())
def test_varray_smart_pointer_deletion2(self):
"""...Test that base is deleted after a double assignment in Python
"""
vcc = test.VarrayContainer()
vcc.initVarray()
a = vcc.varrayPtr
b = vcc.varrayPtr
r = weakref.ref(b)
del a, vcc, b
self.assertIsNone(r())
def test_varray_smart_pointer_deletion3(self):
"""...Test that base is deleted after a double assignment in Python
"""
vcc = test.VarrayContainer()
vcc.initVarray()
# Now mix with some Python
a = vcc.varrayPtr
a_sum = np.sum(a)
# This does not increment C++ reference counter
self.assertEqual(vcc.nRef(), 1)
# Get a weak ref of the array
r = weakref.ref(vcc.varrayPtr)
del vcc
np.testing.assert_array_almost_equal(a_sum, | np.sum(a) | numpy.sum |
#!/usr/local/sci/bin/python
# PYTHON2.7
#
# Author: <NAME>
# Created: 7 April 2016
# Last update: 7 April 2016
# Location: /data/local/hadkw/HADCRUH2/MARINE/EUSTACEMDS/EUSTACE_SST_MAT/
# GitHub: https://github.com/Kate-Willett/HadISDH_Marine_Build/
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code provides tools to read new_suite files and write new_suite*extended and new_suite*uncertainty files.
# It reads the files into a dictionary where each column can be explored through its 'key'.
#
# -----------------------
# LIST OF MODULES
# -----------------------
# inbuilt:
# import numpy as np
# import copy
# import sys, os
# import pdb # pdb.set_trace() or c
#
# Kates:
#
# -----------------------
# DATA
# -----------------------
# /project/hadobs2/hadisdh/marine/ICOADS.2.5.1/*/new_suite_197312_ERAclimNBC.txt
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# python2.7
# import MDS_RWtools as MDStool
#
# MDSdict=MDStool.ReadMDSstandard('year', 'month', 'type')
# year='1973' # string
# month='01' # string
# type='ERAclimNBC' # which iteration of output?
#
# MDSdict=MDStool.ReadMDSextended('year', 'month', 'type')
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output?
#
# MDSdict=MDStool.ReadMDSuncertainty('year', 'month', 'type')
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output?
#
# Writing is slightly more complex
# Can't really think where this one would be used bu just in case
# MDStool.WriteMDSstandard('year', 'month', 'type',MDSDict)
# year='1973' # string
# month='01' # string
# type='ERAclimNBC' # which iteration of output - should also be the name of the directory the file sits in so the program can figure out the filename and path
# MDSDict = {} # A dictionary created by MakeExtDict()
#
# Writing is slightly more complex
# MDStool.WriteMDSextended('year', 'month', 'type',MDSDict)
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output - should also be the name of the directory the file sits in so the program can figure out the filename and path
# MDSDict = {} # A dictionary created by MakeExtDict()
#
# MDStool.WriteMDSuncertainty('year', 'month', 'type',MDSDict)
# year='1973' # string
# month='01' # string
# type='ERAclimBC' # which iteration of output - should also be the name of the directory the file sits in so the program can figure out the filename and path
# MDSDict = {} # A dictionary created by MakeExtDict()
#
# MDSDict=MDStool.MakeStdDict()
#
# MDSDict=MDStool.MakeExtDict()
#
# MDSDict=MDStool.MakeUncDict()
#
#
#
# For reading this runs the code and stops mid-process so you can then interact with the
# data. You should be able to call this from another program too.
#
# -----------------------
# OUTPUT
# -----------------------
# a dictionary to play with
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (25 June 2019)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
# For some reason this stopped working now I'm running on RHEL7 even though its stil python2.7
# This appears to be something with reading in the type for each element. SO now I read in everything as strings and later convert
#
#
# Version 1 (7 April 2016)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
#
#************************************************************************
# START
#************************************************************************
import numpy as np
import sys, os
import copy
import struct
import pdb # pdb.set_trace() or c
# first element is 9 characters lon with a space - so delimiters = 10.
#TheTypesStd=("|S9","|S8","int","int","int","int","int","int",
# "int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","|S8",
# "int","int","int","int","int","int","int","int","int","int","int",
# "int","|S3","|S4","|S4","|S3","|S2","|S3","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int","int",
# "int","int","int","int","int","int","int","int")
TheTypesStd=("str","str","int","int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","str",
"int","int","int","int","int","int","int","int","int","int","int",
"int","str","str","str","str","str","str","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int")
TheDelimitersStd=(10,8,8,8,8,8,8,8, # 8 8 ID, Location and time metadata
8,8,8,8,8, # 5 Temperature and pressure OBS values AT, SST and SLP
8,8,8,8,8,8,8,8,8,8,8,8, # 12 Humidity related OBS values DPT, SHU, VAP, CRH, CWB and DPD
8,8,8,8,8,9, # 6 Deck and Platform ID and other platform related metadata
4,3,3,3,8,3,8,3,8,3,8, # 11 OBS related metadata
4,3,4,4,3,2,3,5,5,5,5,5,7, # 13 Instrument related metadata
2,1,1,1,1,1,1,1,1, # 9 BASE QC
2,1,1,1,1,1,1,1,1, # 9 SST QC
2,1,1,1,1,1,1,1,1, # 9 AT QC
2,1,1,1,1,1,1,1,1, # 9 DPT QC
2,1,1,1,1,1,1,1) # 8 Additional QC
# first element is 9 characters lon with a space - so delimiters = 10.
TheTypesExt=("|S9","|S8","int","int","int","int","int","int",
"int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int",
"|S3","|S3","|S3","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int","int")
TheDelimitersExt=(10,8,8,8,8,8,8,8,
8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,9,
3,3,3,5,5,5,5,5,5,
2,1,1,1,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,1)
# first element is 9 characters lon with a space - so delimiters = 10.
TheTypesUnc=("|S9","|S8","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int","int","int","int","int","int","int",
"int","int","int",
"|S3","|S3","|S3","int","int","int","int","int","int",
"int","int","int","int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int",
"int","int","int","int","int","int")
TheDelimitersUnc=(10,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,8,8,8,8,8,8,8,8,8,8,8,8,
8,8,9,
3,3,3,5,5,5,5,5,5,
2,1,1,1,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,
2,1,1,1,1,1)
#************************************************************************
# ReadMDSstandard
#************************************************************************
def ReadMDSstandard(TheYear,TheMonth,TheType):
# InDir = '/project/hadobs2/hadisdh/marine/ICOADS.2.5.1/'+TheType+'/' # THRESH5_5
InDir = '/project/hadobs2/hadisdh/marine/ICOADS.3.0.0/'+TheType+'/' # THRESH5_5
InFil = 'new_suite_'+TheYear+TheMonth+'_'+TheType+'.txt'
TheFilee = InDir+InFil
print(TheFilee)
# RD - moved the TheTypes and TheDelimiters to outside of definition
# so I can call them from another routine
RawData=ReadData(TheFilee,TheTypesStd,TheDelimitersStd)
MDSDict=dict([])
MDSDict['shipid'] = np.array(RawData[:,0],dtype=TheTypesStd[0])
MDSDict['UID'] = np.array(RawData[:,1],dtype=TheTypesStd[1])
MDSDict['LAT'] = np.array(RawData[:,2],dtype=TheTypesStd[2])/100.
MDSDict['LON'] = np.array(RawData[:,3],dtype=TheTypesStd[3])/100.
MDSDict['YR'] = np.array(RawData[:,4],dtype=TheTypesStd[4])
MDSDict['MO'] = np.array(RawData[:,5],dtype=TheTypesStd[5])
MDSDict['DY'] = np.array(RawData[:,6],dtype=TheTypesStd[6])
MDSDict['HR'] = np.array(RawData[:,7],dtype=TheTypesStd[7])
MDSDict['AT'] =np.array(RawData[:,8],dtype=TheTypesStd[8])/10.
MDSDict['ATA'] =np.array(RawData[:,9],dtype=TheTypesStd[9])/100.
MDSDict['SST'] =np.array(RawData[:,10],dtype=TheTypesStd[10])/10.
MDSDict['SSTA'] =np.array(RawData[:,11],dtype=TheTypesStd[11])/100.
MDSDict['SLP'] =np.array(RawData[:,12],dtype=TheTypesStd[12])/10.
MDSDict['DPT'] =np.array(RawData[:,13],dtype=TheTypesStd[13])/10.
MDSDict['DPTA'] =np.array(RawData[:,14],dtype=TheTypesStd[14])/100.
MDSDict['SHU'] =np.array(RawData[:,15],dtype=TheTypesStd[15])/10.
MDSDict['SHUA'] =np.array(RawData[:,16],dtype=TheTypesStd[16])/100.
MDSDict['VAP'] =np.array(RawData[:,17],dtype=TheTypesStd[17])/10.
MDSDict['VAPA'] =np.array(RawData[:,18],dtype=TheTypesStd[18])/100.
MDSDict['CRH'] =np.array(RawData[:,19],dtype=TheTypesStd[19])/10.
MDSDict['CRHA'] =np.array(RawData[:,20],dtype=TheTypesStd[20])/100.
MDSDict['CWB'] =np.array(RawData[:,21],dtype=TheTypesStd[21])/10.
MDSDict['CWBA'] =np.array(RawData[:,22],dtype=TheTypesStd[22])/100.
MDSDict['DPD'] =np.array(RawData[:,23],dtype=TheTypesStd[23])/10.
MDSDict['DPDA'] =np.array(RawData[:,24],dtype=TheTypesStd[24])/100.
# MDSDict['DSVS']=np.array(RawData['f25'])
MDSDict['DCK'] =np.array(RawData[:,26],dtype=TheTypesStd[26])
MDSDict['SID'] =np.array(RawData[:,27],dtype=TheTypesStd[27])
MDSDict['PT'] =np.array(RawData[:,28],dtype=TheTypesStd[28])
# MDSDict['SI']=np.array(RawData['f29'])
# MDSDict['printsim']=np.array(RawData['f30'])
MDSDict['II'] =np.array(RawData[:,31],dtype=TheTypesStd[31])
MDSDict['IT'] =np.array(RawData[:,32],dtype=TheTypesStd[32])
MDSDict['DPTI'] =np.array(RawData[:,33],dtype=TheTypesStd[33])
MDSDict['WBTI'] =np.array(RawData[:,34],dtype=TheTypesStd[34])
MDSDict['WBT'] =np.array(RawData[:,35],dtype=TheTypesStd[35])/10.
# MDSDict['DI']=np.array(RawData['f36'])
# MDSDict['D']=np.array(RawData['f37'])
MDSDict['WI'] =np.array(RawData[:,38],dtype=TheTypesStd[38])
MDSDict['W'] =np.array(RawData[:,39],dtype=TheTypesStd[39])/10.
# MDSDict['VI']=np.array(RawData['f40'])
# MDSDict['VV']=np.array(RawData['f41'])
# MDSDict['DUPS']=np.array(RawData['f42'])
# MDSDict['COR']=np.array(RawData['f43'])
MDSDict['TOB'] =np.array(RawData[:,44],dtype=TheTypesStd[44])
MDSDict['TOT'] =np.array(RawData[:,45],dtype=TheTypesStd[45])
MDSDict['EOT'] = | np.array(RawData[:,46],dtype=TheTypesStd[46]) | numpy.array |
"""
Functions for working with current polylines, e.g. for calculating the magnetic field and potentials as well as the inductance
"""
__all__ = [
"magnetic_field",
"mutual_inductance",
"scalar_potential",
"self_inductance",
"vector_potential",
]
import numpy as np
from .integrals import omega
import trimesh
def cross(r1, r2):
"""Cross product without overhead"""
result = np.zeros(r1.shape)
result[0] = r1[1] * r2[2] - r1[2] * r2[1]
result[1] = r1[2] * r2[0] - r1[0] * r2[2]
result[2] = r1[0] * r2[1] - r1[1] * r2[0]
return result
def magnetic_field(vertices, points):
"""Compute B-field of a segmented line current.
See: Compact expressions for the Biot–Savart fields of a filamentary segments
by Hanson & Hirshman: https://doi.org/10.1063/1.1507589
Parameters
----------
vertices: (N_line, 3) array
Vertices of the line with N_line-1 segments
The first and last vertices should be the same to close the loop.
points: (N_points, 3) array
Magnetic field evaluation points
Returns
-------
bfield: (N_points, 3) array
Magnetic field at evaluation points
"""
field = np.zeros(points.T.shape)
for i in range(len(vertices) - 1):
r1 = vertices[i]
r2 = vertices[i + 1]
# Vectors between vertices and field points
a1 = points.T - r1.reshape(3, 1)
a2 = points.T - r2.reshape(3, 1)
# Direction of the field
f = cross(a1, a2)
# Vector lengths
d1 = np.sqrt(np.sum(a1 ** 2, axis=0))
d2 = np.sqrt(np.sum(a2 ** 2, axis=0))
# Normalize direction field and divide by cylindrical distance
f *= (d1 + d2) / (d1 * d2 * (d1 * d2 + np.sum(a1 * a2, axis=0)))
field = field + f
return field.T * 1e-7
def vector_potential(vertices, points, reg=1e-12, symmetrize=True):
"""Compute vector potential of a segmented line currents.
Based on straightforward integration of 1/r potential over a line
i.e. the gamma0 integral
See: Compact expressions for the Biot–Savart fields of a filamentary segments
by Hanson & Hirshman: https://doi.org/10.1063/1.1507589
Parameters
----------
vertices: (N_line, 3) array
Vertices of the line with N_line-1 segments
The first and last vertices should be the same to close the loop.
points: (N_points, 3) array
Evaluation points
Returns
-------
A: array (Npoints, 3)
Vector potential
"""
segments = vertices[1:] - vertices[:-1]
RR = vertices[:, None, :] - points[None, :, :]
dotprods2 = np.sum(RR[1:] * segments[..., None, :], axis=-1)
dotprods1 = np.sum(RR[:-1] * segments[..., None, :], axis=-1)
ss = np.linalg.norm(segments, axis=-1)
segments /= ss[..., None]
rr = np.linalg.norm(RR, axis=-1)
# Regularize s.t. neither the denominator or the numerator can be zero
# Avoid numerical issues directly at the edge
res = np.log(
(rr[1:] * ss[..., None] + dotprods2 + reg)
/ (rr[:-1] * ss[..., None] + dotprods1 + reg)
)
# Symmetrize the result since on the negative extension of the edge
# there's division of two small values resulting numerical instabilities
# (also incompatible with adding the reg value)
if symmetrize:
res2 = -np.log(
(rr[1:] * ss[..., None] - dotprods2 + reg)
/ (rr[:-1] * ss[..., None] - dotprods1 + reg)
)
res = | np.where(dotprods1 + dotprods2 > 0, res, res2) | numpy.where |
import numpy as np
import argparse
from argparse import RawTextHelpFormatter
from windygridworld import WindyGridWorld
from agents import Agent
import matplotlib.pyplot as plt
lr = 0.7
episodes = 200
steps = 8000 #max number of steps before termination
epsilon = 0.05
numSeeds = 50
verbose = False
def runwindy(update, king = False, stochastic = False, rate=None, e=None):
data = np.zeros(episodes)
dataking = np.zeros(episodes)
for i in range(numSeeds):
np.random.seed(i)
windy = WindyGridWorld(king=king, stochastic = stochastic)
numStates = windy.numStates()
numActions = windy.numActions()
lrate = rate if rate is not None else lr
eps = e if e is not None else epsilon
agent = Agent(numStates, numActions, update = update, lr=lrate, epsilon= eps)
datum = run(agent, env = windy,
steps = steps, episodes=episodes, verbose = False)
data +=np.array(datum)
return np.cumsum(data/numSeeds)
def sarsa0(king=False, stochastic=False):
if(verbose and stochastic):
print("generating plot for stochastic world")
elif(verbose and king):
print("generating plot for king")
elif(verbose):
print("generating baseline plot")
update = "sarsa0"
if verbose:print(update)
x = runwindy(update, king=king, stochastic=stochastic)
y = np.arange(episodes)
plt.figure()
plt.plot(x,y, 'r')
plt.grid()
if verbose: print(x[-1],y[-1])
if(king): string = "king"
else: string = "baseline"
if(stochastic): string = "stochastic"
plt.title("sarsa(0): "+string)
plt.savefig("plots/"+string)
def versus_methods(king = False, stochastic = False):
if(verbose): print("running versus_methods")
updates = ["sarsa0","expected-sarsa","Q"]
plt.figure()
for update in updates:
if(verbose): print(update)
x = runwindy(update, king, stochastic)
y = | np.arange(episodes) | numpy.arange |
'''
Base code for 'bmi' feature (both spikes and field potentials) when using the plexon system
'''
import time
import numpy as np
from . import plexnet
from collections import Counter
import os
import array
try:
from config import config
PL_IP = config.plexon_ip
PL_PORT = int(config.plexon_port)
except:
PL_IP = "127.0.0.1" # default to localhost
PL_PORT = 6000
PL_ADDR = (PL_IP, PL_PORT)
PL_SingleWFType = 1
PL_ExtEventType = 4
PL_ADDataType = 5
from riglib.source import DataSourceSystem
class Spikes(DataSourceSystem):
'''
Client for spike data streamed from plexon system, compatible with riglib.source.DataSource
'''
update_freq = 40000
dtype = np.dtype([("ts", np.float), ("chan", np.int32), ("unit", np.int32), ("arrival_ts", np.float64)])
def __init__(self, addr=PL_ADDR, channels=None):
'''
Constructor for plexon.Spikes
Parameters
----------
addr: tuple of length 2
(IP address, UDP port)
channels: optional, default = None
list of channels (electrodes) from which to receive spike data
Returns
-------
Spikes instance
'''
self.conn = plexnet.Connection(*addr)
self.conn.connect(256, waveforms=False, analog=False)
try:
self.conn.select_spikes(channels)
except:
print("Cannot run select_spikes method; old system?")
def start(self):
'''
Connect to the plexon server and start receiving data
'''
self.conn.start_data()
# self.data is a generator (the result of self.conn.get_data() is a 'yield').
# Calling 'self.data.next()' in the 'get' function pulls a new spike timestamp
self.data = self.conn.get_data()
def stop(self):
'''
Disconnect from the plexon server
'''
self.conn.stop_data()
self.conn.disconnect()
def get(self):
'''
Return a single spike timestamp/waveform. Must be polled continuously for additional spike data. The polling is automatically taken care of by riglib.source.DataSource
'''
d = next(self.data)
while d.type != PL_SingleWFType:
d = next(self.data)
return np.array([(d.ts / self.update_freq, d.chan, d.unit, d.arrival_ts)], dtype=self.dtype)
class LFP(DataSourceSystem):
'''
Client for local field potential data streamed from plexon system, compatible with riglib.source.MultiChanDataSource
'''
update_freq = 1000.
gain_digiamp = 1000.
gain_headstage = 1.
# like the Spikes class, dtype is the numpy data type of items that will go
# into the (multi-channel, in this case) datasource's ringbuffer
# unlike the Spikes class, the get method below does not return objects of
# this type (this has to do with the fact that a potentially variable
# amount of LFP data is returned in d.waveform every time
# self.data.next() is called
dtype = np.dtype('float')
def __init__(self, addr=PL_ADDR, channels=None, chan_offset=512):
'''
Constructor for plexon.LFP
Parameters
----------
addr : tuple of length 2
(IP address, UDP port)
channels : optional, default = None
list of channels (electrodes) from which to receive spike data
chan_offset : int, optional, default=512
Indexing offset from the first LFP channel to the indexing system used by the OPX system
Returns
-------
plexon.LFP instance
'''
self.conn = plexnet.Connection(*addr)
self.conn.connect(256, waveforms=False, analog=True)
# for OPX system, field potential (FP) channels are numbered 513-768
self.chan_offset = chan_offset
channels_offset = [c + self.chan_offset for c in channels]
try:
self.conn.select_continuous(channels_offset)
except:
print("Cannot run select_continuous method")
def start(self):
'''
Connect to the plexon server and start receiving data
'''
self.conn.start_data()
self.data = self.conn.get_data()
def stop(self):
'''
Disconnect from the plexon server
'''
self.conn.stop_data()
self.conn.disconnect()
def get(self):
'''
Get a new LFP sample/block of LFP samples from the
'''
d = next(self.data)
while d.type != PL_ADDataType:
d = next(self.data)
# values are in currently signed integers in the range [-2048, 2047]
# first convert to float
waveform = np.array(d.waveform, dtype='float')
# convert to units of mV
waveform = waveform * 16 * (5000. / 2**15) * (1./self.gain_digiamp) * (1./self.gain_headstage)
return (d.chan-self.chan_offset, waveform)
class Aux(DataSourceSystem):
'''
Client for auxiliary analog data streamed from plexon system, compatible with riglib.source.MultiChanDataSource
'''
update_freq = 1000.
gain_digiamp = 1.
gain_headstage = 1.
# see comment above
dtype = np.dtype('float')
def __init__(self, addr=PL_ADDR, channels=None, chan_offset=768):
'''
Constructor for plexon.Aux
Parameters
----------
addr : tuple of length 2
(IP address, UDP port)
channels : optional, default = None
list of channels (electrodes) from which to receive spike data
chan_offset : int, optional, default=768
Indexing offset from the first Aux channel to the indexing system used by the OPX system
Returns
-------
plexon.Aux instance
'''
self.conn = plexnet.Connection(*addr)
self.conn.connect(256, waveforms=False, analog=True)
# for OPX system, the 32 auxiliary input (AI) channels are numbered 769-800
self.chan_offset = chan_offset
channels_offset = [c + self.chan_offset for c in channels]
try:
self.conn.select_continuous(channels_offset)
except:
print("Cannot run select_continuous method")
def start(self):
self.conn.start_data()
self.data = self.conn.get_data()
def stop(self):
self.conn.stop_data()
def get(self):
d = next(self.data)
while d.type != PL_ADDataType:
d = next(self.data)
# values are in currently signed integers in the range [-2048, 2047]
# first convert to float
waveform = | np.array(d.waveform, dtype='float') | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append( | np.mean(drnnGRUtanhMakespan19) | numpy.mean |
import numpy as np
import pybullet as p
from .base_sensor import DirectionalSensor
from .distance_sensor import DistanceSensor3D
from spike_swarm_sim.register import sensor_registry
from spike_swarm_sim.objects import Robot, Robot3D
from spike_swarm_sim.utils import compute_angle, angle_diff, issubclass_of_any, circle_distance
from .utils.propagation import ExpDecayPropagation
@sensor_registry(name='IR_receiver2')
class IRCommunicationReceiver(DirectionalSensor):
""" Communication Receiver mimicking IR technology.
========================================================================
- Args:
msg_length [int] : number of components of the message. Must be the
same as in the Transmitter definition.
max_hops [int] : maximum number of hops before frame discard.
========================================================================
"""
def __init__(self, *args, msg_length=1, max_hops=10, selection_scheme='random', **kwargs):
super(IRCommunicationReceiver, self).__init__(*args, **kwargs)
self.msg_length = msg_length
self.max_hops = max_hops
self.selection_scheme = selection_scheme
self.current_direction = 0
self.propagation = ExpDecayPropagation(rho_att=0.7, phi_att=1.)
self.current_sender = None
self.aux_t = 0
def _target_filter(self, obj):
""" Filtering of potential sender robots. """
return issubclass_of_any(obj, [Robot, Robot3D]) and 'wireless_transmitter' in obj.actuators
def _step_direction(self, rho, phi, direction_reading, direction, obj=None, diff_vector=None):
""" Step the sensor of a sector, receiving the frame messages and the underlying
context. For a detailed explanation of this method see DirectionalSensor._step_direction.
"""
condition = obj is not None\
and rho <= self.range\
and phi <= self.aperture\
and rho <= obj.actuators['wireless_transmitter'].range\
and obj.actuators['wireless_transmitter'].frame['enabled']
#* Fill initial reading with empty frame
if direction_reading is None:
direction_reading = self.empty_msg
if condition:
signal_strength = self.propagation(rho, phi)
if signal_strength > direction_reading['signal']:
my_pos = self.get_position(self.sensors_idx[direction]) + np.r_[0, 0, 0.1] #+ np.r_[0, 0, 0.017]
tar_pos = obj.position + np.r_[0, 0, 0.07] # my_pos[2]]
ray_res = p.rayTest(my_pos, tar_pos, physicsClientId=self.sensor_owner.physics_client)[0][0]
if ray_res == obj.id:
sending_direction = 0 #!np.argmin([angle_diff(sdir, compute_angle(diff_vector) + np.pi) for sdir in self.directions(obj.orientation)])
sending_angle = self.directions(0.)[sending_direction]
receiving_angle = self.directions(0.)[direction]
direction_reading['sending_direction'] = np.r_[np.cos(sending_angle), | np.sin(sending_angle) | numpy.sin |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clr
from scipy import signal
import tensorflow as tf
# Number of sections
M = 3
# Sample rate
f_s = 24000
# Passband center frequency
f0 = 9000
# Number of frequencies to compute
N = 2048
section_colors = np.zeros([M, 3])
for k in range(M):
section_colors[k] = clr.hsv_to_rgb([(k / (M - 1.0)) / 3.0, 0.5, 0.75])
# Get one of BP poles that maps to LP prototype pole.
def lp_to_bp(s, rbw, w0):
return w0 * (s * rbw / 2 + 1j * np.sqrt(1.0 - np.power(s * rbw / 2, 2)))
# Frequency response
def freq_response(z, b, a):
p = b[0]
q = a[0]
for k in range(1, len(b)):
p += b[k] * np.power(z, -k)
for k in range(1, len(a)):
q += a[k] * np.power(z, -k)
return p / q
# Absolute value in decibel
def abs_db(h):
return 20 * np.log10(np.abs(h))
# Poles of analog low-pass prototype
none, S, none = signal.buttap(M)
# Band limits
c = np.power(2, 1 / 12.0)
f_l = f0 / c
f_u = f0 * c
# Analog frequencies in radians
w0 = 2 * np.pi * f0
w_l = 2 * np.pi * f_l
w_u = 2 * np.pi * f_u
# Relative bandwidth
rbw = (w_u - w_l) / w0
jw0 = 2j * np.pi * f0
z0 = np.exp(jw0 / f_s)
# 1. Analog filter parameters
bc, ac = signal.butter(M, [w_l, w_u], btype='bandpass', analog=True)
ww, H_a = signal.freqs(bc, ac, worN=N)
magnH_a = np.abs(H_a)
f = ww / (2 * np.pi)
omega_d = ww / f_s
z = np.exp(1j * ww / f_s)
# 2. Initial filter design
a = np.zeros([M, 3], dtype=np.double)
b = np.zeros([M, 3], dtype=np.double)
hd = np.zeros([M, N], dtype=np.complex)
# Pre-warp the frequencies
w_l_pw = 2 * f_s * np.tan(np.pi * f_l / f_s)
w_u_pw = 2 * f_s * np.tan(np.pi * f_u / f_s)
w_0_pw = np.sqrt(w_l_pw * w_u_pw)
rbw_pw = (w_u_pw - w_l_pw) / w_0_pw
poles_pw = lp_to_bp(S, rbw_pw, w_0_pw)
# Bilinear transform
T = 1.0 / f_s
poles_d = (1.0 + poles_pw * T / 2) / (1.0 - poles_pw * T / 2)
for k in range(M):
p = poles_d[k]
b[k], a[k] = signal.zpk2tf([-1, 1], [p, np.conj(p)], 1)
g0 = freq_response(z0, b[k], a[k])
g0 = np.abs(g0)
b[k] /= g0
none, hd[k] = signal.freqz(b[k], a[k], worN=omega_d)
plt.figure(1)
plt.title("Initial digital filter (bilinear)")
plt.axis([0, f_s / 2, -90, 10])
plt.plot(f, abs_db(H_a), label='Target response', color='gray', linewidth=0.5)
for k in range(M):
label = "Section %d" % k
plt.plot(f, abs_db(hd[k]), color=section_colors[k], alpha=0.5, label=label)
# Combined frequency response of initial digital filter
Hd = np.prod(hd, axis=0)
plt.plot(f, abs_db(Hd), 'k', label='Cascaded filter')
plt.legend(loc='upper left')
plt.savefig('01_initial_h.svg')
plt.figure(2)
plt.title("Initial filter - poles and zeros")
plt.axis([-3, 3, -2.25, 2.25])
unitcircle = plt.Circle((0, 0), 1, color='lightgray', fill=False)
ax = plt.gca()
ax.add_artist(unitcircle)
for k in range(M):
zeros, poles, gain = signal.tf2zpk(b[k], a[k])
plt.plot(np.real(poles), np.imag(poles), 'x', color=section_colors[k])
plt.plot(np.real(zeros), np.imag(zeros), 'o', color='none', markeredgecolor=section_colors[k], alpha=0.5)
plt.savefig('02_initial_zp.svg')
# Optimizing filter
tH_a = tf.constant(magnH_a, dtype=tf.float32)
# Assign weights
weight = np.zeros(N)
for i in range(N):
# In the passband or below?
if (f[i] <= f_u):
weight[i] = 1.0
with tf.device('/cpu:0'):
tWeight = tf.constant(weight, dtype=tf.float32)
tZ = tf.placeholder(tf.complex64, [1, N])
# Variables to be changed by optimizer
ta = tf.Variable(a)
tb = tf.Variable(b)
# TF requires matching types for multiplication;
# cast real coefficients to complex
cta = tf.cast(ta, tf.complex64)
ctb = tf.cast(tb, tf.complex64)
xb0 = tf.reshape(ctb[:, 0], [M, 1])
xb1 = tf.reshape(ctb[:, 1], [M, 1])
xb2 = tf.reshape(ctb[:, 2], [M, 1])
xa0 = tf.reshape(cta[:, 0], [M, 1])
xa1 = tf.reshape(cta[:, 1], [M, 1])
xa2 = tf.reshape(cta[:, 2], [M, 1])
# Numerator: B = b₀z² + b₁z + b₂
tB = tf.matmul(xb0, tf.square(tZ)) + tf.matmul(xb1, tZ) + xb2
# Denominator: A = a₀z² + a₁z + a₂
tA = tf.matmul(xa0, tf.square(tZ)) + tf.matmul(xa1, tZ) + xa2
# Get combined frequency response
tH = tf.reduce_prod(tB / tA, axis=0)
iterations = 2000
learning_rate = 0.0005
# Loss function
loss = tf.reduce_mean(tWeight * tf.squared_difference(tf.abs(tH), tH_a))
optimizer = tf.train.AdamOptimizer(learning_rate)
updates = optimizer.minimize(loss)
zz = np.reshape(z, [1, N])
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
for i in range(iterations):
loss_val, _ = sess.run([loss, updates], feed_dict={tZ: zz})
if (i % 100 == 0):
print(" Loss: ", loss_val)
b, a = sess.run([tb, ta])
for k in range(M):
none, hd[k] = signal.freqz(b[k], a[k], worN=omega_d)
plt.figure(3)
plt.title("Optimized digital filter")
plt.axis([0, f_s / 2, -90, 10])
# Draw the band limits
plt.axvline(f_l, color='black', linewidth=0.5, linestyle='--')
plt.axvline(f_u, color='black', linewidth=0.5, linestyle='--')
plt.plot(f, abs_db(H_a), label='Target response', color='gray', linewidth=0.5)
Hd = np.prod(hd, axis=0)
for k in range(M):
label = "Section %d" % k
plt.plot(f, abs_db(hd[k]), color=section_colors[k], alpha=0.5, label=label)
magnH_d = | np.abs(Hd) | numpy.abs |
"""Returns simulation from component."""
import inspect
import warnings
from typing import Any, Dict, Optional
import meep as mp
import numpy as np
import pydantic
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.extension import move_polar_rad_copy
from gdsfactory.simulation.gmeep.get_material import get_material
from gdsfactory.tech import LAYER_STACK, LayerStack
mp.verbosity(0)
sig = inspect.signature(mp.Simulation)
settings_meep = set(sig.parameters.keys())
@pydantic.validate_arguments
def get_simulation(
component: Component,
resolution: int = 30,
extend_ports_length: Optional[float] = 10.0,
layer_stack: LayerStack = LAYER_STACK,
zmargin_top: float = 3.0,
zmargin_bot: float = 3.0,
tpml: float = 1.5,
clad_material: str = "SiO2",
is_3d: bool = False,
wavelength_start: float = 1.5,
wavelength_stop: float = 1.6,
wavelength_points: int = 50,
dfcen: float = 0.2,
port_source_name: str = "o1",
port_field_monitor_name: str = "o2",
port_margin: float = 3,
distance_source_to_monitors: float = 0.2,
port_source_offset: float = 0,
port_monitor_offset: float = 0,
dispersive: bool = False,
**settings,
) -> Dict[str, Any]:
r"""Returns Simulation dict from gdsfactory Component
based on meep directional coupler example
https://meep.readthedocs.io/en/latest/Python_Tutorials/GDSII_Import/
https://support.lumerical.com/hc/en-us/articles/360042095873-Metamaterial-S-parameter-extraction
.. code::
top view
________________________________
| |
| xmargin_left | port_extension
|<------> port_margin ||<-->
___|___________ _________||___
| \ / |
| \ / |
| ====== |
| / \ |
___|___________/ \__________|___
| | <-------->|
| |ymargin_bot xmargin_right|
| | |
|___|___________________________|
side view
________________________________
| | |
| | |
| zmargin_top |
|ymargin | |
|<---> _____ _|___ |
| | | | | |
| | | | | |
| |_____| |_____| |
| | |
| | |
| |zmargin_bot |
| | |
|_______|_______________________|
Args:
component: gf.Component
resolution: in pixels/um (20: for coarse, 120: for fine)
extend_ports_length: to extend ports beyond the PML
layer_stack: Dict of layer number (int, int) to thickness (um)
zmargin_top: thickness for cladding above core
zmargin_bot: thickness for cladding below core
tpml: PML thickness (um)
clad_material: material for cladding
is_3d: if True runs in 3D
wavelength_start: wavelength min (um)
wavelength_stop: wavelength max (um)
wavelength_points: wavelength steps
dfcen: delta frequency
port_source_name: input port name
port_field_monitor_name:
port_margin: margin on each side of the port
distance_source_to_monitors: in (um) source goes before
port_source_offset: offset between source GDS port and source MEEP port
port_monitor_offset: offset between monitor GDS port and monitor MEEP port
dispersive: use dispersive material models (requires higher resolution)
Keyword Args:
settings: other parameters for sim object (resolution, symmetries, etc.)
Returns:
simulation dict: sim, monitors, sources
Make sure you review the simulation before you simulate a component
.. code::
import gdsfactory as gf
import gdsfactory.simulation.meep as gm
c = gf.components.bend_circular()
gm.write_sparameters_meep(c, run=False)
"""
for setting in settings.keys():
if setting not in settings_meep:
raise ValueError(f"{setting} not in {settings_meep}")
layer_to_thickness = layer_stack.get_layer_to_thickness()
layer_to_material = layer_stack.get_layer_to_material()
layer_to_zmin = layer_stack.get_layer_to_zmin()
layer_to_sidewall_angle = layer_stack.get_layer_to_sidewall_angle()
component_ref = component.ref()
component_ref.x = 0
component_ref.y = 0
wavelengths = np.linspace(wavelength_start, wavelength_stop, wavelength_points)
port_names = list(component_ref.ports.keys())
if port_source_name not in port_names:
warnings.warn(f"port_source_name={port_source_name!r} not in {port_names}")
port_source = component_ref.get_ports_list()[0]
port_source_name = port_source.name
warnings.warn(f"Selecting port_source_name={port_source_name!r} instead.")
if port_field_monitor_name not in component_ref.ports:
warnings.warn(
f"port_field_monitor_name={port_field_monitor_name!r} not in {port_names}"
)
port_field_monitor = (
component_ref.get_ports_list()[0]
if len(component.ports) < 2
else component.get_ports_list()[1]
)
port_field_monitor_name = port_field_monitor.name
warnings.warn(
f"Selecting port_field_monitor_name={port_field_monitor_name!r} instead."
)
assert isinstance(
component, Component
), f"component needs to be a gf.Component, got Type {type(component)}"
component_extended = (
gf.components.extension.extend_ports(
component=component, length=extend_ports_length, centered=True
)
if extend_ports_length
else component
)
gf.show(component_extended)
component_extended.flatten()
component_extended = component_extended.ref()
# geometry_center = [component_extended.x, component_extended.y]
# geometry_center = [0, 0]
# print(geometry_center)
layers_thickness = [
layer_to_thickness[layer]
for layer in component.layers
if layer in layer_to_thickness
]
t_core = max(layers_thickness)
cell_thickness = tpml + zmargin_bot + t_core + zmargin_top + tpml if is_3d else 0
cell_size = mp.Vector3(
component.xsize + 2 * tpml,
component.ysize + 2 * tpml,
cell_thickness,
)
geometry = []
layer_to_polygons = component_extended.get_polygons(by_spec=True)
for layer, polygons in layer_to_polygons.items():
if layer in layer_to_thickness and layer in layer_to_material:
height = layer_to_thickness[layer] if is_3d else mp.inf
zmin_um = layer_to_zmin[layer] if is_3d else 0
# center = mp.Vector3(0, 0, (zmin_um + height) / 2)
for polygon in polygons:
vertices = [mp.Vector3(p[0], p[1], zmin_um) for p in polygon]
material_name = layer_to_material[layer]
material = get_material(name=material_name, dispersive=dispersive)
geometry.append(
mp.Prism(
vertices=vertices,
height=height,
sidewall_angle=layer_to_sidewall_angle[layer],
material=material,
# center=center
)
)
freqs = 1 / wavelengths
fcen = np.mean(freqs)
frequency_width = dfcen * fcen
# Add source
port = component_ref.ports[port_source_name]
angle_rad = np.radians(port.orientation)
width = port.width + 2 * port_margin
size_x = width * abs( | np.sin(angle_rad) | numpy.sin |
from data.raw_data_loader.base.base_raw_data_loader import SeqTaggingRawDataLoader
import os
import h5py
import json
import numpy as np
class RawDataLoader(SeqTaggingRawDataLoader):
def __init__(self, data_path):
super().__init__(data_path)
self.wp2_data_path = "aij-wikiner-en-wp2"
self.wp3_data_path = "aij-wikiner-en-wp3"
self.all_deps = dict()
def load_data(self):
if (
len(self.X) == 0
or len(self.Y) == 0
or self.attributes["label_vocab"] is None
):
total_size = self.process_data_file(
os.path.join(self.data_path, self.wp2_data_path)
)
total_size += self.process_data_file(
os.path.join(self.data_path, self.wp3_data_path)
)
self.attributes["index_list"] = [i for i in range(total_size)]
self.attributes["label_vocab"] = dict()
for labels in self.Y.values():
for label in labels:
if label not in self.attributes["label_vocab"]:
self.attributes["label_vocab"][label] = len(
self.attributes["label_vocab"]
)
def process_data_file(self, file_path):
cnt = 0
with open(file_path, "r") as f:
for i, line in enumerate(f):
if i != 0:
line = line.strip()
if line:
single_x = []
single_y = []
single_dep = []
tokens = line.split(" ")
for token in tokens:
word, dep, label = token.split("|")
single_x.append(word)
single_y.append(label)
single_dep.append(dep)
assert len(self.X) == len(self.Y) == len(self.all_deps)
idx = len(self.X)
self.X[idx] = single_x
self.Y[idx] = single_y
self.all_deps[idx] = single_dep
cnt += 1
return cnt
def generate_h5_file(self, file_path):
f = h5py.File(file_path, "w")
f["attributes"] = json.dumps(self.attributes)
utf8_type = h5py.string_dtype("utf-8", None)
for key in self.X.keys():
f["X/" + str(key)] = np.array(self.X[key], dtype=utf8_type)
f["Y/" + str(key)] = np.array(self.Y[key], dtype=utf8_type)
f["all_deps/" + str(key)] = | np.array(self.all_deps[key], dtype=utf8_type) | numpy.array |
# coding=utf-8
"""Circles, collisions and gravity"""
import glfw
from OpenGL.GL import *
import OpenGL.GL.shaders
import numpy as np
import random
import sys
import os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import grafica.basic_shapes as bs
import grafica.easy_shaders as es
import grafica.transformations as tr
import grafica.performance_monitor as pm
#Definimos la clase Controller
class Controller:
def __init__(self):
self.fillPolygon = True
self.x = -0.5
self.y = 0.0
self.jump = False
self.g = np.array([0.0, -1.0], dtype=np.float32)
self.vy = 0
# global controller as communication with the callback function
controller = Controller()
class Shape:
def __init__(self, vertices, indices, textureFileName=None):
self.vertices = vertices
self.indices = indices
self.textureFileName = textureFileName
# This function will be executed whenever a key is pressed or released
def on_key(window, key, scancode, action, mods):
global controller
if key == glfw.KEY_SPACE and action ==glfw.PRESS:
controller.jump = True
elif key == glfw.KEY_ESCAPE:
glfw.set_window_should_close(window, True)
# Caso en que se cierra la ventana
elif key == glfw.KEY_ESCAPE and action ==glfw.PRESS:
glfw.set_window_should_close(window, True)
def createCircle(r,g,b,N):
# First vertex at the center, white color
vertices = [0, 0, 0, r, g, b]
indices = []
dtheta = 2 * np.pi / N
for i in range(N):
theta = i * dtheta
if r<1.0 and g<1.0 and b<1.0:
vertices += [
# vertex coordinates
0.5 * np.cos(theta), 0.5 * np.sin(theta), 0,
# color generates varying between 0 and 1
r+0.5* | np.sin(theta) | numpy.sin |
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
import numpy as np
from astropy.io import fits
import matplotlib.colors
import cv2
from astropy.visualization.mpl_normalize import simple_norm
import math
from skimage import exposure
from astropy.wcs import WCS
import os
import warnings
from tkinter import messagebox as MessageBox
from astropy.stats import sigma_clip
from scipy import interpolate
from astropy.coordinates import SkyCoord
from PIL import Image
from PIL import ImageTk
import matplotlib.patches as patches
from tkinter import scrolledtext
import platform
warnings.filterwarnings("ignore")
class prueba_ifsUW():
file_dir = ''
band = 0
name = ''
hi_data = ''
data = ''
header_file = ''
wcs_header = ''
size_x = 0
size_y = 0
pixels = 1
x_ticks = []
y_ticks = []
x_ticks_l = []
y_ticks_l = []
Integrated_spectrum =[]
integrated_x = []
integrated_y = []
wcs = 0
arrlambda = np.zeros(pixels)
array_data = 0
dband=0
res = []
cir_x = 0
cir_y = 0
name_f = ""
spectrum= 0
min_value_da= 0
max_value_da= 0
####Flags
flag_explorer = 0
flag_flux=0
flag_wave=0
flag_band=0
flag_file=0
flag_integrate_region = 0
flag_integrate_region2 = 0
flag_create_fits = 0
flag_system = 0
red_marks = []
maps_array = []
maps_array_inv = []
ax1 = 0
ax0 = 0
saved_image = 0
imagen_final = 0
color = "#E6E6FA"
##variables for the graphic part
window = Tk()
window.title("IFS Explorer")
window.geometry("1350x730")
window.resizable(0, 0)
radius_ = IntVar()
band_sticks = IntVar()
min_value_la = 0
max_value_la = 0
varla1 = StringVar()
varlaflux = StringVar()
varla3 = DoubleVar()
varla4 = DoubleVar()
varlawave = StringVar()
varla5 = IntVar()
varla6 = IntVar()
varla7 = StringVar()
varla8 = StringVar()
varla9 = StringVar()
varla10 = StringVar()
varla11 = DoubleVar()
var = IntVar()
var.set(1)
var3 = StringVar()
var3 = StringVar()
sp1 = IntVar()
##graphic elements
box_entry = 0
entry_Radius = 0
entry_wvlenMin = 0
entry_wvlenMax = 0
entry_MWave = 0
entry_shFiltmin = 0
entry_shFiltmax = 0
entry_shFilt = 0
bar_ = 0
canvas = 0
f2 = ''
canvas2 = 0
f = 0
combo1 = 0
combo2 = 0
def __init__(self):
'''
function that define the type of Operative System, necesarry to know how access to the files
like image or filters
'''
def operative_system():
sistema = platform.system()
if sistema == "Windows":
(self.flag_system) = 0
else:
(self.flag_system) = 1
'''
function that defines the maps_array and maps_array_inv, necessary to
Function set_color_map()
'''
def get_cmaps():
#parte para crear los mapas solo una vez:
prism = matplotlib.colors.LinearSegmentedColormap.from_list('custom prism', [(0, "white"),(0.2, '#000000'),(0.4, '#8b0000'),(0.6, '#f63f2b'),(0.8, '#15E818'),(1, '#1139d9' )], N=256)
stern = matplotlib.colors.LinearSegmentedColormap.from_list('custom stern',[(0, "white"),(0.2, '#8b0000'),(0.3, '#e42121'),(0.4, '#252850'),(0.6, '#0588EF'), (0.8, '#3b83bd'),(1, '#c6ce00' )], N=256)
std = matplotlib.colors.LinearSegmentedColormap.from_list('custom Std-Gamma', [(0, "white"),(0.2, '#0000ff'),(0.4, '#2178E4'),(0.6, '#ff0000'),(0.8, '#ff8000'),(1, '#ffff00' )], N=256)
BGRY = matplotlib.colors.LinearSegmentedColormap.from_list('custom BGRY', [(0, "white"),(0.2, '#ff8000'),(0.4, '#EFEE05'),(0.6, '#EF5A05'),(0.8, '#51EF05'),(1, '#0000ff' )], N=256)
califa = matplotlib.colors.LinearSegmentedColormap.from_list('custom CALIFA special', [(0, "white"),(0.25, '#00008B'),(0.5, '#B2FFFF'),(0.62, '#B2FFFF'),(0.75, '#ff4000'),(1, '#008f39' )], N=256)
ping = matplotlib.colors.LinearSegmentedColormap.from_list('custom Pingsoft-special', [(0, "white"),(0.25, '#00008B'),(0.5, '#3b83bd'),(0.75, '#ff8000'),(1, '#ffff00' )], N=256)
prism_r= matplotlib.colors.LinearSegmentedColormap.from_list('custom prism inv', [(0, '#1139d9'),(0.2, '#15E818'),(0.4, '#f63f2b'),(0.6, '#8b0000'),(0.8, '#000000'),(1, "white" )], N=256)
stern_r= matplotlib.colors.LinearSegmentedColormap.from_list('custom strn inv', [(0, '#c6ce00'),(0.2, '#3b83bd'),(0.3, '#0588EF'),(0.4, '#252850'),(0.6, '#e42121'),(0.8, '#8b0000'),(1, "white" )], N=256)
std_r= matplotlib.colors.LinearSegmentedColormap.from_list('custom std inv', [(0, '#ffff00'),(0.2, '#ff8000'),(0.4, '#ff0000'),(0.6, '#2178E4'),(0.8, '#0000ff'),(1, "white" )], N=256)
BGRY_r= matplotlib.colors.LinearSegmentedColormap.from_list('custom BGRY inv', [(0, '#0000ff'),(0.2, '#51EF05'),(0.4, '#EF5A05'),(0.6, '#EFEE05'),(0.8, '#ff8000'),(1, "white" )], N=256)
califa_r= matplotlib.colors.LinearSegmentedColormap.from_list('custom CALIFA inv', [(0, '#008f39'),(0.25, '#ff4000'),(0.5, '#B2FFFF'),(0.62, '#B2FFFF'),(0.75, '#00008B'),(1, "white" )], N=256)
ping_r= matplotlib.colors.LinearSegmentedColormap.from_list('custom Pingsoft inv', [(0, '#ffff00'),(0.25, '#ff8000'),(0.5, '#3b83bd'),(0.75, '#00008B'), (1, "white" )], N=256)
(self.maps_array).append('Blues')
(self.maps_array).append('Reds')
(self.maps_array).append('Greens')
(self.maps_array).append('Greys')
(self.maps_array).append(ping)
(self.maps_array).append(califa)
(self.maps_array).append('rainbow')
(self.maps_array).append(BGRY)
(self.maps_array).append(prism)
(self.maps_array).append(stern)
(self.maps_array).append(std)
(self.maps_array_inv).append('Blues_r')
(self.maps_array_inv).append('Reds_r')
(self.maps_array_inv).append('Greens_r')
(self.maps_array_inv).append('Greys_r')
(self.maps_array_inv).append(ping_r)
(self.maps_array_inv).append(califa_r)
(self.maps_array_inv).append('rainbow_r')
(self.maps_array_inv).append(BGRY_r)
(self.maps_array_inv).append(prism_r)
(self.maps_array_inv).append(stern_r)
(self.maps_array_inv).append(std_r)
'''
Function to control the click inside the canvas that display the FITS image
and controls the function to select the spaxel for integrate_region()
'''
def onclick_(event):
if (self.flag_file)==1:
if (self.flag_integrate_region) == 0:
if (self.flag_explorer) == 1:
(self.flag_explorer) = 0
(self.varla10).set("Explorer OFF")
else:
(self.flag_explorer) = 1
(self.varla10).set("Explorer ON")
else:
if (self.flag_integrate_region2) == 0:
try:
cord_x = int(round(event.xdata))
cord_y = int(round(event.ydata))
draw_circle(int(round(event.xdata)), int(round(event.ydata)))
except Exception as e:
print(e)
'''
Function to register the mouse movement within the canvas that shows the image fits,
when the explorer is active and the mousse is adove a spaxel, call the function
coordinates()
'''
def move_mouse(event):
if (self.flag_explorer) == 1:
try:
cord_x = int(round(event.xdata))
cord_y = int(round(event.ydata))
coordinates_(int(round(event.xdata)), int(round(event.ydata)))
except Exception as e:
var = "not spaxel in graph"
'''
Function to set wavelength range, with varla5 and varla6 for the minimum and maximum
values, is only for varla5 < varla6, with
minimum value of array lambda <= varla5 < varla6 <= maximum value of array lambda
Negative values are not accepted
'''
def set_wavelength_range():
if (self.flag_file)==1:
try:
if (self.varla5).get() >= (self.varla6).get() or ((self.varla6).get() >= (self.max_value_la) and (self.varla5).get() >= (self.max_value_la)) or ((self.varla6).get() <= (self.min_value_la) and (self.varla5).get() <= (self.min_value_la)) :
MessageBox.showerror("Error!","The minimum and the maximum value should be differents and the first value minimum that the second")
(self.varla5).set(int(self.min_value_la))
(self.varla6).set(int(self.max_value_la))
(self.ax0).set_xlim(xmin=(self.min_value_la),xmax=(self.max_value_la))
(self.canvas).draw()
else:
if (self.varla5).get() >= (self.min_value_la) and (self.varla6).get() <= (self.max_value_la):
(self.ax0).set_xlim(xmin=(self.varla5).get(),xmax=(self.varla6).get())
(self.flag_wave) = 1
(self.canvas).draw()
else:
if (self.varla5).get() >= (self.min_value_la) and (self.varla6).get() > (self.max_value_la):
MessageBox.showwarning("Warning!","The maximum value is %d"%(np.amax((self.arrlambda))))
(self.ax0).set_xlim(xmin=(self.varla5).get(),xmax=(self.max_value_la))
(self.varla6).set(int(((self.ax0).get_xlim())[1]))
(self.flag_wave) = 1
(self.canvas).draw()
else:
if (self.varla5).get() < (self.min_value_la) and (self.varla6).get() <= (self.max_value_la):
MessageBox.showwarning("Warning!","The minimum value is %d"%((self.min_value_la)))
(self.ax0).set_xlim(xmin=(self.min_value_la),xmax=(self.varla6).get())
(self.varla5).set(int(((self.ax0).get_xlim())[0]))
(self.flag_wave) = 1
(self.canvas).draw()
else:
MessageBox.showwarning("Warning!","The minimum value is %d and the maximum value is %d"%((self.min_value_la),(self.max_value_la)))
(self.varla5).set(int(self.min_value_la))
(self.varla6).set(int(self.max_value_la))
(self.ax0).set_xlim(xmin=(self.min_value_la),xmax=(self.max_value_la))
(self.canvas).draw()
except Exception as e:
MessageBox.showerror("Error!","Please, enter numbers")
(self.varla5).set(int(self.min_value_la))
(self.varla6).set(int(self.max_value_la))
(self.ax0).set_xlim(xmin=(self.min_value_la),xmax=(self.max_value_la))
(self.canvas).draw()
else:
MessageBox.showerror("Error!","Please, select a file")
'''
function to reset wavelength range to the origins values that are show in the variables of:
varla5 = minimum value of the array lambda
varla6 = maximum value of the array lambda
'''
def reset_wavelength_range():
print("reset wave")
if (self.flag_file)==1:
if (self.flag_wave) == 1:
(self.flag_wave) = 0
(self.varla5).set(int(self.min_value_la))
(self.varla6).set(int(self.max_value_la))
(self.ax0).set_xlim(xmin=(self.min_value_la),xmax=(self.max_value_la))
(self.canvas).draw()
else:
MessageBox.showerror("Error!","Please, select a file")
'''
function to set the flux range with varla3 and varla4 for the minimum and maximum
values, is only for varla3 < varla4, negative values are accepted
'''
def set_flux_range():
if (self.flag_file)==1:
try:
if (self.varla3).get() < (self.varla4).get():
(self.ax0).set_ylim(ymin=(self.varla3).get(),ymax=(self.varla4).get())
# (self.canvas).draw()
(self.flag_flux)=1
for i in (self.red_marks):
(self.ax0).axvline(int(i),(self.varla3).get(),(self.varla4).get(),color='red')
(self.canvas).draw()
else:
MessageBox.showerror("Error!","The minimum value should be minimum that the maximum value")
reset_flux_range()
except Exception as e:
print(e)
MessageBox.showerror("Error!","Please, enter numbers")
reset_flux_range()
else:
MessageBox.showerror("Error!","Please, select a file")
'''
function to reset flux range
varla3 = minimum value of the actual spectrum
varla4 = maximum value of the actual spectrum
'''
def reset_flux_range():
if (self.flag_file)==1:
(self.flag_flux)=0
(self.varla3).set(0)
(self.varla4).set(0)
if (self.flag_integrate_region)==1 and (self.flag_integrate_region2)==1:
(self.ax0).set_ylim(ymin=np.amin((self.Integrated_spectrum)),ymax=np.amax((self.Integrated_spectrum))*1.2)
else:
(self.ax0).set_ylim(ymin=np.amin((self.spectrum)),ymax=np.amax((self.spectrum))*1.2)
for i in (self.red_marks):
(self.ax0).axvline(int(i),(self.min_value_da),(self.max_value_da),color='red')
(self.canvas).draw()
else:
MessageBox.showerror("Error!","Please, select a file")
'''function to set the label that show the value of the band.
changes when user move the scale bar
'''
def set_bar(bar_1):
(self.varla11).set(bar_1)
'''function to set the value of the band,is defined by the variable varla11
when the user enters the number of the band directly, it affects changing
the value of the scale bar
'''
def set_band():
if (self.flag_file)==1:
try:
(self.band) = (self.varla11).get()
(self.bar_).set((self.varla11).get())
# (self.varla11).set((self.varla11).get())
(self.flag_band)=1
filters_(self.name_f)
set_scaling()
except Exception as e:
(self.varla11).set((self.band))
(self.bar_).set((self.varla11).get())
MessageBox.showerror("Error!","Please, enter numbers")
else:
MessageBox.showerror("Error!","Please, select a file")
'''function to set scale, affect only to the image of the FITS file
the image should be clear to apply a scale
Depends of the display axis, the color map and the filter applied
'''
def set_scaling():
if (self.flag_file)==1:
scaling = (self.var).get()
if (self.sp1).get()==1:
cmap_1=(self.maps_array_inv)[(self.combo1).current()]
else:
cmap_1=(self.maps_array)[(self.combo1).current()]
if (self.band_sticks).get() == 0:
(self.f2).clf()
(self.ax1) = (self.f2).add_subplot(projection=(self.wcs_header), slices=('x', 'y', 2))
else:
(self.ax1).cla()
(self.ax1).set_xlabel( 'RA (arcsec)' )
(self.ax1).set_ylabel( 'DEC (arcsec)' )
(self.ax1).set_xticks((self.x_ticks))
(self.ax1).set_yticks((self.y_ticks))
(self.ax1).set_xticklabels((self.x_ticks_l))
(self.ax1).set_yticklabels((self.y_ticks_l))
if scaling == 1:
(self.saved_image)=(self.ax1).imshow((self.image_final),cmap=cmap_1,interpolation='nearest',origin='lower' )
lineal = simple_norm((self.image_final), stretch='linear')
(self.saved_image).set_norm(lineal)
else:
if scaling == 2:
total = sigma_clip((self.image_final), sigma=2)
(self.saved_image)=(self.ax1).imshow(total,cmap=cmap_1,interpolation='nearest',origin='lower' )
else:
if scaling == 3:
(self.saved_image)=(self.ax1).imshow((self.image_final),cmap=cmap_1,interpolation='nearest',origin='lower' )
asin_h = simple_norm((self.image_final), stretch='asinh')
(self.saved_image).set_norm(asin_h)
else:
if scaling == 4:
(self.saved_image)=(self.ax1).imshow((self.image_final),cmap=cmap_1,interpolation='nearest',origin='lower' )
power = 2.0
power_l = simple_norm((self.image_final), stretch='power', power=power)
(self.saved_image).set_norm(power_l)
else:
if scaling == 5:
(self.saved_image)=(self.ax1).imshow((self.image_final),cmap=cmap_1,interpolation='nearest',origin='lower')
raiz_c = simple_norm((self.image_final), stretch='sqrt')
(self.saved_image).set_norm(raiz_c)
else:
if scaling == 6:
(self.saved_image)=(self.ax1).imshow((self.image_final),cmap=cmap_1,interpolation='nearest',origin='lower' )
img_cdf, bin_centers = exposure.cumulative_distribution((self.image_final))
final = np.interp((self.image_final),bin_centers,img_cdf)
# (self.ax1).cla()
(self.saved_image)=(self.ax1).imshow(final,cmap=cmap_1,interpolation='nearest',origin='lower' )
else:
if scaling == 7:
(self.saved_image)=(self.ax1).imshow((self.image_final),cmap=cmap_1,interpolation='nearest',origin='lower' )
sigma=1
norm_img = np.zeros((self.size_x,self.size_y))
imagen_pi = cv2.normalize((self.image_final),norm_img, -math.pi, math.pi, cv2.NORM_MINMAX)
una=1/(sigma*math.sqrt(2*math.pi))
cuadrado= | np.power(imagen_pi,2) | numpy.power |
import open3d as o3d
import os, sys
import argparse
import random
from tqdm import tqdm
import numpy as np
import pandas as pd
import cv2
import torch
import pickle
import time
from pathlib import Path
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset, DataLoader
from ossid.models.dtoid import DtoidNet
from ossid.models.maskrcnn import MaskRCNN
from ossid.datasets import getDataloaders
from ossid.datasets.utils import collate_fn
from ossid.utils import expandBox, dict_to, to_np, move_to
from ossid.utils.bop_utils import saveResultsBop
from ossid.utils.zephyr_utils import networkInference
from ossid.config import OSSID_CKPT_ROOT, OSSID_DATA_ROOT, BOP_RESULTS_FOLDER, OSSID_RESULT_ROOT, BOP_DATASETS_ROOT, OSSID_DET_ROOT
from ossid.utils.detection import saveLmoYcbvGT, evalFinetuneResults
from zephyr.datasets.score_dataset import ScoreDataset
from zephyr.models.pointnet2 import PointNet2SSG
from zephyr.options import getOptions, checkArgs
from zephyr.utils import depth2cloud, meta2K, K2meta, projectPointsUv
from zephyr.utils.metrics import add, adi
from zephyr.utils.bop_dataset import BopDataset, BopDatasetArgs
from zephyr.utils.halcon_wrapper import PPFModel
from zephyr.utils.renderer import Renderer, blend
from zephyr.utils.icp import icpRefinement
from zephyr.constants import OBJECT_DIAMETERES
from zephyr.data_util import hypoShiftYcbv2BopBatch, modelPointsShiftYcbv2Bop, modelShiftBopYcbv
from zephyr.full_pipeline.model_featurization import FeatureModel
from zephyr.full_pipeline.scene_featurization import featurizeScene
from bop_toolkit_lib.visibility import estimate_visib_mask_gt
from bop_toolkit_lib.misc import ensure_dir, depth_im_to_dist_im_fast
import faulthandler
faulthandler.enable()
def makeFolder(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def getFeaturizedModels(dataset):
from zephyr.full_pipeline.options import getOptions
parser = getOptions()
args = parser.parse_args([])
args.bop_root = dataset.bop_root
args.dataset_name = dataset.dataset_name
args.grid_dir_name = "grid"
args.sampled_model_dir_name = "model_pc"
args.grid_indices_path = os.path.join(args.bop_root, args.dataset_name, args.grid_dir_name, "verts_grid_0.npy")
dataset.dataset_camera["fx"] = dataset.dataset_camera['K'][0,0]
dataset.dataset_camera["fy"] = dataset.dataset_camera['K'][1,1]
dataset.dataset_camera["cx"] = dataset.dataset_camera['K'][0,2]
dataset.dataset_camera["cy"] = dataset.dataset_camera['K'][1,2]
featured_objects = {}
for obj_id in dataset.obj_ids:
is_sym = obj_id in dataset.sym_obj_ids
obj = FeatureModel(dataset.dataset_root, is_sym, args, create_index=True)
obj.construct(obj_id, dataset.getObjPath(obj_id), dataset.dataset_camera)
featured_objects[obj_id] = obj
return featured_objects
def main(main_args):
torch.manual_seed(42)
np.random.seed(42)
random.seed(42)
DATASET_NAME = main_args.dataset_name
DTOID_CONFIDENT_THRESHOLD = 0.5
ZEPHYR_CONFIDENT_THRESHOLD = 20
SAVE_ROOT = OSSID_RESULT_ROOT
assert not (main_args.ignore_dtoid_mask and main_args.always_dtoid_mask)
makeFolder(SAVE_ROOT)
makeFolder(BOP_RESULTS_FOLDER)
next_finetune_number = main_args.finetune_interval
'''Initialize the trained DTOID model'''
# Use the DTOID network
if main_args.dtoid_weights_path is not None:
ckpt_v = int(main_args.dtoid_weights_path.split("/")[-2].split("_")[1][1:])
ckpt_path = Path(main_args.dtoid_weights_path)
conf_path = ckpt_path.parent.parent / ("config_v%d.yaml" % ckpt_v)
elif DATASET_NAME == 'lmo':
conf_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_conf_lmo.yaml")
if main_args.use_offline_model:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_transductive_lmo.ckpt")
else:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained.ckpt")
elif DATASET_NAME == 'ycbv':
conf_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_conf_ycbv.yaml")
if main_args.use_offline_model:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_transductive_ycbv.ckpt")
else:
ckpt_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained.ckpt")
ossid_args = OmegaConf.load(conf_path)
# Override arguments by use-provided directories
ossid_args.dataset.bop_root = BOP_DATASETS_ROOT
ossid_args.model.pretrained_dtoid_path = os.path.join(OSSID_CKPT_ROOT, "dtoid_pretrained_original.pth.tar")
if DATASET_NAME == 'ycbv':
ossid_args.dataset.grid_root = os.path.join(OSSID_DATA_ROOT, "templates_YCBV_BOP")
ossid_args.dataset.zephyr_result_path = os.path.join(OSSID_DATA_ROOT, "test_ycbv_boptest_zephyr_result_unseen.pkl")
elif DATASET_NAME == 'lmo':
ossid_args.dataset.grid_root = os.path.join(OSSID_DATA_ROOT, "templates_LMO_DTOID")
ossid_args.dataset.zephyr_result_path = os.path.join(OSSID_DATA_ROOT, "lmo_boptest_zephyr_result.pkl")
# Use the DTOID provided by original authors (https://github.com/jpmerc/DTOID)
# This model was trained also on YCB-V objects, and thus can only be used to evaluate on LM-O.
ossid_args.model.use_pretrained_dtoid = main_args.use_pretrained_dtoid
ossid_args.dataset.test_dataset_name = main_args.dataset_name
ossid_args.dataset.train_dataset_name = main_args.dataset_name
# Keep all the zephyr results for the training set
ossid_args.dataset.zephyr_filter_key = None
ossid_args.dataset.zephyr_results_percent = 1
# use more templates for training
ossid_args.dataset.train_local_template_sample_from = 10
if main_args.n_local_test is not None:
ossid_args.dataset.n_local_test = main_args.n_local_test
elif main_args.use_pretrained_dtoid: # If their weights are used
ossid_args.dataset.n_local_test = 160
else: # If our weights are used
ossid_args.dataset.n_local_test = 10
print("Number of local templates =", ossid_args.dataset.n_local_test)
train_loader, valid_loader, test_loader = getDataloaders(ossid_args)
# Sort the test loader
test_loader.dataset.sortTargets(reverse=main_args.backward)
ModelClass = DtoidNet
model = DtoidNet(ossid_args)
if main_args.use_pretrained_dtoid:
# DTOID weightes provided by the authors will be loaded
print("Loading DTOID weights provided by the original authors")
pass
elif ckpt_path is not None:
print("Loading DTOID Model weights from", ckpt_path)
ckpt = torch.load(ckpt_path)
model.load_state_dict(ckpt['state_dict'])
initial_state_dict = model.state_dict()
model = model.to(0)
model = model.eval()
'''Initialize the trained Zephyr model'''
if DATASET_NAME == 'lmo':
CKPT_PATH = os.path.join(OSSID_CKPT_ROOT, "final_lmo.ckpt") # The path to the checkpoint
USE_ICP = False # Not using ICP for LMO dataset, as it only uses PPF hypotheses, which are already after ICP processing.
MODEL_DATA_TPATH = os.path.join(OSSID_DATA_ROOT, "zephyr_model_data", "lmo", "model_cloud_{:02d}.npz") # path template to the sampled point cloud
INCONST_RATIO_TH = 100
elif DATASET_NAME == 'ycbv':
if main_args.test_seen:
CKPT_PATH_FOR_ODD = os.path.join(OSSID_CKPT_ROOT, "final_ycbv.ckpt")
CKPT_PATH_FOR_EVEN = os.path.join(OSSID_CKPT_ROOT, "final_ycbv_valodd.ckpt")
else:
CKPT_PATH_FOR_ODD = os.path.join(OSSID_CKPT_ROOT, "final_ycbv_valodd.ckpt")
CKPT_PATH_FOR_EVEN = os.path.join(OSSID_CKPT_ROOT, "final_ycbv.ckpt")
USE_ICP = True # using ICP for LMO dataset
MODEL_DATA_TPATH = os.path.join(OSSID_DATA_ROOT, "zephyr_model_data", "ycbv", "model_cloud_{:02d}.npz") # path template to the sampled point cloud
INCONST_RATIO_TH = 10
'''Set up the arguments for the model'''
parser = getOptions()
zephyr_args = parser.parse_args([])
# Model-related
zephyr_args.model_name = "pn2"
zephyr_args.dataset = "HSVD_diff_uv_norm"
zephyr_args.no_valid_proj = True
zephyr_args.no_valid_depth = True
zephyr_args.inconst_ratio_th = INCONST_RATIO_TH
# Dataset-related
zephyr_args.dataset_root = [""]
zephyr_args.dataset_name = [DATASET_NAME]
# zephyr_args.resume_path = CKPT_PATH
zephyr_args.test_dataset = True
'''Initialize pytorch dataloader and model'''
# dataloader is only needed for the getPointNetData() function
# zephyr_loader = getDataloader(zephyr_args)[0]
zephyr_dataset = ScoreDataset([], "", DATASET_NAME, zephyr_args, mode='test')
zephyr_args.dim_point = zephyr_dataset.dim_point
zephyr_args.unseen_oids = []
zephyr_args.extra_bottleneck_dim = 0
if main_args.dataset_name == "ycbv":
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH_FOR_ODD)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
zephyr_model_for_odd = zephyr_model
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH_FOR_EVEN)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
zephyr_model_for_even = zephyr_model
else:
zephyr_model = PointNet2SSG(zephyr_args.dim_point, zephyr_args, num_class=1)
zephyr_ckpt = torch.load(CKPT_PATH)
zephyr_model.load_state_dict(zephyr_ckpt['state_dict'])
zephyr_model = zephyr_model.to(0).eval()
'''Initialize the BOP dataset'''
# Set up the options
bop_args = BopDatasetArgs(
bop_root=BOP_DATASETS_ROOT,
dataset_name=DATASET_NAME,
model_type=None,
split_name="bop_test", # This indicates we want to use the testing set defined in BOP challenge (different than original test set)
split="test",
split_type=None,
ppf_results_file=None,
skip=1, # Iterate over all test samples, with no skipping
)
bop_dataset = BopDataset(bop_args)
print("Length of the test dataset:", len(bop_dataset))
'''Load the zephyr results'''
zephyr_results = pickle.load(open(ossid_args.dataset.zephyr_result_path, 'rb'))
zephyr_results = {(r['obj_id'], r['scene_id'], r['im_id']):r for r in zephyr_results}
# Extract the training dataset from the training loader
train_dtoid_bop_dataset = train_loader.dataset
train_dtoid_bop_dataset.clearTargets()
# Recover from the training/validation split on zephyr results
train_dtoid_bop_dataset.zephyr_results = zephyr_results
'''optimizer for dtoid model'''
optimizer = torch.optim.Adam(
model.parameters(),
lr = 1e-4,
weight_decay = 1e-6,
amsgrad = True
)
'''Test the DTOID model before finetuning'''
if main_args.raw_dtoid:
print("Testing the DTOID model before finetuning")
test_results = testDtoidModel(model, test_loader)
save_path = os.path.join(SAVE_ROOT, "before_finetune_dtoid_results_%s.pkl" % main_args.exp_name)
print("Saving results to", save_path)
pickle.dump({
"test_results": test_results,
"main_args": main_args,
}, open(save_path, 'wb'))
df = pd.DataFrame.from_dict(test_results)
print("DTOID mean IoU:", df['dtoid_iou'].mean())
print("DTOID Valid IoU recall", (df['dtoid_iou'] > 0.5).astype(float).mean())
return 0
if main_args.use_sift_hypos:
# Initialize the featured model for YCB-V dataset
featured_objects = getFeaturizedModels(bop_dataset)
'''main loop'''
test_results = []
finetune_logs = []
renderers = {}
# Create the surface model (PPF training stage)
print("Creating PPF models using Halcon")
ppf_models = {}
for obj_id in bop_dataset.obj_ids:
full_model_path = bop_dataset.model_tpath.format(obj_id=obj_id)
if DATASET_NAME == 'ycbv':
ppf_models[obj_id] = PPFModel(full_model_path, ModelSamplingDist = 0.03)
else:
ppf_models[obj_id] = PPFModel(full_model_path)
# Preloading all model data
print("Preloading all model data")
model_data_all = {}
for obj_id in bop_dataset.obj_ids:
# Load the information of the model point cloud from the pre-processed dataset
model_data_path = MODEL_DATA_TPATH.format(obj_id)
model_data = np.load(model_data_path)
model_points, model_colors, model_normals = model_data['model_points'], model_data['model_colors'], model_data['model_normals']
model_data_all[obj_id] = (model_points, model_colors, model_normals)
# The batch is the data for dtoid dataset
for iteration, batch in tqdm(enumerate(test_loader), total=len(test_loader)):
obj_id, scene_id, im_id = batch['obj_id'].item(), batch['scene_id'].item(), batch['im_id'].item()
zr = zephyr_results[(obj_id, scene_id, im_id)]
# Get the full mesh model provided by LineMOD dataset
full_model_path = bop_dataset.model_tpath.format(obj_id=obj_id)
# Get the raw data from the bop dataset, preparing for zephyr inference
bop_data = bop_dataset.getDataByIds(obj_id, scene_id, im_id)
# Extract the data from the bop datapoint
img, depth, scene_camera = bop_data['img'], bop_data['depth'], bop_data['scene_camera']
scene_meta = bop_data['scene_meta']
mat_gt = bop_data['mat_gt']
cam_K = | np.asarray(scene_camera['cam_K']) | numpy.asarray |
################################################################################
# Copyright (C) 2011-2015 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import warnings
import time
import h5py
import datetime
import tempfile
import scipy
import logging
from bayespy.utils import misc
from bayespy.inference.vmp.nodes.node import Node
class VB():
r"""
Variational Bayesian (VB) inference engine
Parameters
----------
nodes : nodes
Nodes that form the model. Must include all at least all stochastic
nodes of the model.
tol : double, optional
Convergence criterion. Tolerance for the relative change in the VB
lower bound.
autosave_filename : string, optional
Filename for automatic saving
autosave_iterations : int, optional
Iteration interval between each automatic saving
callback : callable, optional
Function which is called after each update iteration step
"""
def __init__(self,
*nodes,
tol=1e-5,
autosave_filename=None,
autosave_iterations=0,
use_logging=False,
user_data=None,
callback=None):
self.user_data = user_data
for (ind, node) in enumerate(nodes):
if not isinstance(node, Node):
raise ValueError("Argument number %d is not a node" % (ind+1))
if use_logging:
logger = logging.getLogger(__name__)
self.print = logger.info
else:
# By default, don't use logging, just print stuff
self.print = print
# Remove duplicate nodes
self.model = misc.unique(nodes)
self.ignore_bound_checks = False
self._figures = {}
self.iter = 0
self.annealing_changed = False
self.converged = False
self.L = | np.array(()) | numpy.array |
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
IMG_DIM = 28
def tomat(vec):
img_dim = int(np.sqrt(len(vec)))
return vec.reshape((img_dim,img_dim))
def plot_1(img_vec):
"""img_mat must be single vector-representation of image to plot"""
if len(img_vec.shape) <= 1:
img_mat = tomat(img_vec)
else:
img_mat = img_vec # It was a matrix already
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(img_mat, cmap=mpl.cm.binary)
ax.axis('off')
plt.show()
def plot_100(img_vec_array):
first_100 = img_vec_array[:100]
image_mat_10x10 = np.zeros((IMG_DIM*10, IMG_DIM*10))
for x in range(10):
for y in range(10):
# Replace sub-matrix with appropriate values
image_mat_10x10[IMG_DIM*y : IMG_DIM*y+IMG_DIM,
IMG_DIM*x : IMG_DIM*x+IMG_DIM] = tomat(first_100[10*y + x])
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.matshow(image_mat_10x10, cmap=mpl.cm.binary)
ax.axis('off')
plt.show()
return fig
def sig(x):
return 1 / (1 + np.exp(-x))
def w_scale(fan_in=100, fan_out=100):
return 4*np.sqrt(6/(fan_in+fan_out))
def softmax(x):
"""Numerically stable version..."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
def d_sig(x):
return sig(x)*(1-sig(x))
def binary_vec_plot(vec, title=""):
plt.matshow(np.matrix(vec), cmap=mpl.cm.binary)
plt.title(title)
def grad_vec_plot(vec, title=""):
plt.matshow(np.matrix(vec), cmap=mpl.cm.RdBu)
plt.title(title)
def train_singlelayer(epochs=200, rate=0.1, momentum=0.0, dropout=0.0, L2_reg=0.0,
train_filepath='./data/digitstrain.txt',
test_filepath='./data/digitstest.txt',
validation_filepath='./data/digitsvalid.txt',
seed=42, n_hidden=100, validation=False):
alldata_train = np.loadtxt(train_filepath, delimiter=',')
alldata_test = np.loadtxt(test_filepath, delimiter=',')
momentum = 1 - momentum # Make sure 0 = no momentum (was opposite)
np.random.seed(seed)
np.random.shuffle(alldata_train)
np.random.shuffle(alldata_test)
y_train = alldata_train[:,-1].astype(int)
digits_train = alldata_train[:,:-1]
y_test = alldata_test[:,-1].astype(int)
digits_test = alldata_test[:,:-1]
if validation==True:
alldata_valid = np.loadtxt(validation_filepath, delimiter=',')
np.random.shuffle(alldata_valid)
y_test = alldata_valid[:,-1].astype(int)
digits_test = alldata_valid[:,:-1]
outputs = []
ce_errors_train = [] # Cross entropy errors for test
ce_test_per_epoch = []
ce_errors_test = []
ce_train_per_epoch = []
class_errors_test = []
class_test_per_epoch = []
class_errors_train = []
class_train_per_epoch = []
b1 = np.zeros(n_hidden)
b2 = np.zeros(10)
W1 = np.random.uniform(w_scale(784, n_hidden), -w_scale(784, n_hidden), size=(n_hidden,784))
Wout = np.random.uniform(w_scale(n_hidden, 10), -w_scale(n_hidden, 10), size=(10,n_hidden))
PreActOut_grad = 0
b2_grad = 0
Wout_grad = 0
h1_grad = 0
PreAct1_grad = 0
b1_grad = 0
W1_grad = 0
for epoch in range(epochs):
# Evaluate Training Data
for n in range(len(digits_test)):
###### Forward Pass
h0 = digits_test[n]
# Hidden Layer 1
PreAct1 = W1 @ h0 + b1
h1 = sig(PreAct1)
# Output Layer
PreActOut = Wout @ h1 + b2
output_test = softmax(PreActOut)
# True value
indicator_vector_test = np.zeros(10)
indicator_vector_test[y_test[n]] = 1 # Set
outputs.append(indicator_vector_test)
ce_errors_test.append(np.sum(-np.multiply(np.log(output_test),indicator_vector_test)))
if np.argmax(output_test) == y_test[n]:
class_errors_test.append(0)
else:
class_errors_test.append(1)
for n in range(len(digits_train)):
dropout_mask = np.random.binomial(1,(1-dropout), n_hidden)
###### Forward Pass
h0 = digits_train[n]
# Hidden Layer 1
PreAct1 = W1 @ h0 + b1
h1 = sig(PreAct1) * dropout_mask
# Output Layer
PreActOut = Wout @ h1 + b2
output = softmax(PreActOut)
# Backward Pass
# Output of network
# True value
indicator_vector = np.zeros(10)
indicator_vector[y_train[n]] = 1 # Set
outputs.append(indicator_vector)
ce_errors_train.append(np.sum(-np.multiply(np.log(output),indicator_vector)))
if np.argmax(output) == y_train[n]:
class_errors_train.append(0)
else:
class_errors_train.append(1)
#### Start output layer
# Resulting gradient at lowest level (inverse sign from notes)
PreActOut_grad = (momentum * (indicator_vector - output) + (1- momentum)*PreActOut_grad +
L2_reg * np.sum(W1.flatten()*W1.flatten()))
# [10]
# How much cost will change per pre-activation (at X)
# connection weight deltas
b2_grad = momentum * PreActOut_grad + (1- momentum)*b2_grad
Wout_grad = momentum * np.array(np.matrix(PreActOut_grad).T @ np.matrix(h1)) + (1- momentum)*Wout_grad # [10,100]
# Apply updates
b2 = b2 + rate*b2_grad
Wout = Wout + rate*Wout_grad
#### End Output Layer
#### Start hidden layer 1
# How much cost will change per hidden unit activation value
h1_grad = momentum * Wout.T @ PreActOut_grad + (1- momentum)* h1_grad # [100]
PreAct1_grad = momentum * np.multiply(h1_grad,d_sig(PreAct1)) + (1- momentum)*PreAct1_grad # Element-wise multiplication
b1_grad = momentum * PreAct1_grad + (1- momentum)*b1_grad
W1_grad = momentum * np.matrix(PreAct1_grad).T @ np.matrix(h0) + (1- momentum)*W1_grad
# Apply updates
b1 = b1 + rate*b1_grad
W1 = W1 + rate*np.array(W1_grad)
#### End hidden layer 1
ce_train_per_epoch.append(np.average(ce_errors_train))
ce_errors_train = []
ce_test_per_epoch.append(np.average(ce_errors_test))
ce_errors_test = []
class_test_per_epoch.append(np.average(class_errors_test))
class_errors_test = []
class_train_per_epoch.append(np.average(class_errors_train))
class_errors_train = []
result = {'output': output,
'W1': W1,
'Wout': Wout,
'ce_test_per_epoch': ce_test_per_epoch,
'ce_train_per_epoch': ce_train_per_epoch,
'class_test_per_epoch': class_test_per_epoch,
'class_train_per_epoch': class_train_per_epoch}
return result
def train_twolayer(epochs=200, rate=0.1, momentum=0.0, dropout=0,
train_filepath='./data/digitstrain.txt',
test_filepath='./data/digitstest.txt',
validation_filepath='./data/digitsvalid.txt',
seed=42, validation=False):
alldata_train = np.loadtxt(train_filepath, delimiter=',')
alldata_test = np.loadtxt(test_filepath, delimiter=',')
momentum = 1 - momentum # Make sure 0 = no momentum (was opposite)
np.random.seed(seed)
np.random.shuffle(alldata_train)
np.random.shuffle(alldata_test)
y_train = alldata_train[:,-1].astype(int)
digits_train = alldata_train[:,:-1]
y_test = alldata_test[:,-1].astype(int)
digits_test = alldata_test[:,:-1]
if validation==True:
alldata_valid = np.loadtxt(validation_filepath, delimiter=',')
np.random.shuffle(alldata_valid)
y_test = alldata_valid[:,-1].astype(int)
digits_test = alldata_valid[:,:-1]
outputs = []
ce_errors_train = [] # Cross entropy errors for test
ce_test_per_epoch = []
ce_errors_test = []
ce_train_per_epoch = []
class_errors_test = []
class_test_per_epoch = []
class_errors_train = []
class_train_per_epoch = []
b1 = np.zeros(100)
b2 = np.zeros(100)
b3 = np.zeros(10)
W1 = np.random.uniform(w_scale(784, 100), -w_scale(784, 100), size=(100,784))
W2 = np.random.uniform(w_scale(100, 100), -w_scale(100, 100), size=(100,100))
Wout = np.random.uniform(w_scale(100, 10), -w_scale(100, 10), size=(10,100))
PreActOut_grad = 0
h1_grad = 0
h2_grad = 0
PreAct1_grad = 0
PreAct2_grad = 0
b1_grad = 0
b2_grad = 0
b3_grad = 0
W1_grad = 0
W2_grad = 0
Wout_grad = 0
for epoch in range(epochs):
# Evaluate Training Data
for n in range(len(digits_test)):
###### Forward Pass
h0 = digits_test[n]
# Hidden Layer 1
PreAct1 = W1 @ h0 + b1
h1 = sig(PreAct1)
# Hidden Layer 2
PreAct2 = W2 @ h1 + b2
h2 = sig(PreAct2)
# Output Layer
PreActOut = Wout @ h2 + b3
output_test = softmax(PreActOut)
# True value
indicator_vector_test = np.zeros(10)
indicator_vector_test[y_test[n]] = 1 # Set
outputs.append(indicator_vector_test)
ce_errors_test.append(np.sum(-np.multiply(np.log(output_test),indicator_vector_test)))
if np.argmax(output_test) == y_test[n]:
class_errors_test.append(0)
else:
class_errors_test.append(1)
for n in range(len(digits_train)):
###### Forward Pass
h0 = digits_train[n]
# Hidden Layer 1
PreAct1 = W1 @ h0 + b1
h1 = sig(PreAct1)
# Hidden Layer 2
PreAct2 = W2 @ h1 + b2
h2 = sig(PreAct2)
# Output Layer
PreActOut = Wout @ h2 + b3
output = softmax(PreActOut)
# Backward Pass
# Output of network
# True value
indicator_vector = np.zeros(10)
indicator_vector[y_train[n]] = 1 # Set
outputs.append(indicator_vector)
ce_errors_train.append(np.sum(-np.multiply(np.log(output),indicator_vector)))
if np.argmax(output) == y_train[n]:
class_errors_train.append(0)
else:
class_errors_train.append(1)
#### Start output layer
# Resulting gradient at lowest level (inverse sign from notes)
PreActOut_grad = momentum * (indicator_vector - output) + (1- momentum)*PreActOut_grad
# [10]
# How much cost will change per pre-activation (at X)
# connection weight deltas
b3_grad = momentum * PreActOut_grad + (1- momentum)*b3_grad
Wout_grad = momentum * np.array(np.matrix(PreActOut_grad).T @ | np.matrix(h1) | numpy.matrix |
import numpy as np
import kmeans
import common
import naive_em
import em
# Read toy data
X = np.loadtxt("toy_data.txt")
X_netflix_incomplete = np.loadtxt("netflix_incomplete.txt")
X_netflix_complete = np.loadtxt("netflix_complete.txt")
# Parameters
K = 1
seed = 1
# Testing the Kmeans along K (number of clusters) and seed (random init)
########################################################################
# Initialize min cost per K array
min_cost = np.ones(4)*np.inf
min_cost_seed = | np.zeros(4) | numpy.zeros |
import sys
import numpy as np
from gym import spaces
import gym
def PAdam(A, b, x_start, length, bound=10, step_size=1.0):
obj_vals = []
def Obj_func(x):
return np.dot(np.dot(A, x), x) + np.dot(x, b)
def Grad_eval(x):
return np.dot(A + A.transpose(), x).flatten() + b
x = x_start
time_since_grad_reset = 0
beta_1 = 0.9
beta_2 = 0.999
grad_mean = 0
grad_var = 0
for i in range(length):
obj_vals.append(Obj_func(x))
epsilon = 1e-8
current_grad = Grad_eval(x)
grad_mean = beta_1 * grad_mean + (1.0 - beta_1) * current_grad
grad_var = beta_2 * grad_var + (1.0 - beta_2) * np.square(current_grad)
time_since_grad_reset += 1
t = time_since_grad_reset # t is really t+1 here
mean_hat = grad_mean / (1 - beta_1 ** t)
var_hat = grad_var / (1 - beta_2 ** t)
step_size = 1.0
x_action_delta = step_size * np.divide(mean_hat, np.sqrt(var_hat) + epsilon)
# The clipping operation could cause issues with adam from a motivational stand point
x = np.clip(x - x_action_delta, -bound, bound)
return obj_vals
class SwitchingquadraticEnv(gym.Env):
"""
This is enviroment is for a random quadratic problem not necessarly convex
with a switching based optimizer. Our agent choses between a random new point, SGD, or adam.
"""
def Obj_func(self, x):
A = self.A
b = self.b
return np.dot(np.dot(A, x), x) + np.dot(x, b)
def Grad_eval(self, x):
A = self.A
b = self.b
return np.dot(A + A.transpose(), x).flatten() + b
def __init__(self, config):
super(SwitchingquadraticEnv, self).__init__()
self.dim_list = config["dim"]
self.dim = np.random.choice(self.dim_list) # size of matrix dim* dim
# size of our search cube (-bound, bound)^dim
self.bound = config["bound"]
self.num_steps = config["num_steps"]
self.switching_style = config["switching_style"]
"""
Current state is (-bound, bound)^dim
objective value deltas are (-inf, inf)^(h_len,1)
"""
bound = self.bound
dim = self.dim
# More complicated matrix generation
random_m = np.random.uniform(-1, 1, size=(self.dim, self.dim))
# More complicated matrix generation
# old code
if config["old_A_style"]:
self.A = 0.5 * (random_m + np.transpose(random_m))
else:
self.A = np.tril(random_m) + np.tril(random_m, k=-1).T
self.b = np.random.uniform(-1, 1, size=self.dim)
self.x = np.random.uniform(low=-bound, high=bound, size=dim)
"""
Adding memory depth
"""
self.memory_depth = config["Memory_depth"]
self.observation_space = spaces.Box(
low=-np.inf, high=np.inf, shape=(9 + self.memory_depth,), dtype=np.float32
)
"""
Action space should be trinary choice
"""
if self.switching_style == "All":
self.action_space = spaces.Discrete(3)
elif self.switching_style == "RandAdam":
self.action_space = spaces.Discrete(2)
elif self.switching_style == "RandGD":
self.action_space = spaces.Discrete(2)
elif self.switching_style == "AdamGD":
self.action_space = spaces.Discrete(2)
else:
raise ValueError(f"'{self.switching_style}' is not a valid swithing style")
# All of our mutable state
self.x = np.random.uniform(low=-bound, high=bound, size=dim)
self.best_val = self.Obj_func(self.x)
self.time_since_grad_reset = 0
self.step_count = 0
self.grad_mean = self.Grad_eval(self.x)
self.total_grad_norm = np.linalg.norm(self.Grad_eval(self.x))
self.grad_var = np.zeros(shape=(dim,), dtype=np.float32)
temp_padam_val = min(
PAdam(
self.A, self.b, self.x, self.num_steps, bound=self.bound, step_size=1.0,
)
)
if (
temp_padam_val < self.Obj_func(self.x)
and abs(temp_padam_val - self.Obj_func(self.x)) > 1.0
):
self.padam_reward_normalize = abs(temp_padam_val - self.Obj_func(self.x))
else:
self.padam_reward_normalize = 1.0
def reset(self):
self.dim = np.random.choice(self.dim_list)
random_m = np.random.uniform(-1, 1, size=(self.dim, self.dim))
self.A = 0.5 * (random_m + np.transpose(random_m))
self.b = np.random.uniform(-1, 1, size=self.dim)
self.x = np.random.uniform(low=-self.bound, high=self.bound, size=self.dim)
self.best_val = self.Obj_func(self.x)
self.total_grad_norm = np.linalg.norm(self.Grad_eval(self.x))
self.grad_mean = self.Grad_eval(self.x)
self.grad_var = np.zeros(shape=(self.dim,), dtype=np.float32)
self.time_since_grad_reset = 0
self.step_count = 0
temp_padam_val = min(
PAdam(
self.A, self.b, self.x, self.num_steps, bound=self.bound, step_size=1.0,
)
)
if (
temp_padam_val < self.Obj_func(self.x)
and abs(temp_padam_val - self.Obj_func(self.x)) > 1.0
):
self.padam_reward_normalize = abs(temp_padam_val - self.Obj_func(self.x))
else:
self.padam_reward_normalize = 1.0
return_list = []
return_list.append(np.linalg.norm(self.Grad_eval(self.x))) # Grad norm
return_list.append(self.Obj_func(self.x)) # current value
return_list.append(self.best_val) # best value
return_list.append(self.step_count)
return_list.append(self.dim)
# new observation information
return_list.append(np.linalg.norm(self.A))
return_list.append( | np.linalg.norm(self.b) | numpy.linalg.norm |
# Question 1: Create a 4X2 integer array and Prints its attributes
# Note: The element must be a type of unsigned int16. And print the following Attributes: –
#
# The shape of an array.
# Array dimensions.
# The Length of each element of the array in bytes.
import numpy as np
Array = np.empty([4, 2], dtype=np.uint16)
print("Array Shape is: ", Array.shape)
print("Array dimensions are ", Array.ndim)
print("Length of each element of array in bytes is ", Array.itemsize)
# Question 2: Create a 5X2 integer array from a range between 100 to 200 such that the difference between each element is 10
import numpy as np
a = np.arange(100, 200, 10)
b = a.reshape(5, 2)
print(b)
# Question 3: Following is the provided numPy array. return array of items in the third column from all rows
import numpy as np
sampleArray = np.array([[11, 22, 33], [44, 55, 66], [77, 88, 99]])
sampleArray = numpy.array([[11, 22, 33], [44, 55, 66], [77, 88, 99]])
newArray = sampleArray[..., 1]
print(newArray)
# Question 4: Following is the given numpy array return array of odd rows and even columns
import numpy
sampleArray = numpy.array([[3, 6, 9, 12], [15, 18, 21, 24],
[27, 30, 33, 36], [39, 42, 45, 48], [51, 54, 57, 60]])
print("Printing Input Array")
print(sampleArray)
print("\n Printing array of odd rows and even columns")
newArray = sampleArray[::2, 1::2]
print(newArray)
# Question 5: Add the following two NumPy arrays and Modify a result array by calculating the square of each element
# import numpy
#
# arrayOne = numpy.array([[5, 6, 9], [21 ,18, 27]])
# arrayTwo = numpy.array([[15 ,33, 24], [4 ,7, 1]])
import numpy
arrayOne = | numpy.array([[5, 6, 9], [21, 18, 27]]) | numpy.array |
import os
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from losses import focal_loss,weighted_binary_crossentropy
from utils import Dataset
class DeepFM(object):
def __init__(self, params):
self.feature_size = params['feature_size']
self.field_size = params['field_size']
self.embedding_size = params['embedding_size']
self.deep_layers = params['deep_layers']
self.l2_reg_coef = params['l2_reg']
self.learning_rate = params['learning_rate']
self.pos_ratio = params['pos_ratio']
self.keep_prob_v = params['keep_prob']
self.activate = tf.nn.relu
self.weight = {}
self.saver=None
self.checkpoint_dir = params['checkpoint_dir']
self.build()
def build(self):
"""
feature_size: N
field_size: F
embedding_size: K
batch_size: None
"""
self.feat_index = tf.placeholder(tf.int32, shape=[None, None], name='feature_index')
self.feat_value = tf.placeholder(tf.float32, shape=[None, None], name='feature_value')
self.label = tf.placeholder(tf.float32, shape=[None,1], name='label')
self.keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_prob') # scaler
self.is_training= tf.placeholder(tf.bool, shape=[],name='is_training')
#1、-------------------------定义权值-----------------------------------------
# FM部分中一次项的权值定义
self.weight['first_order'] = tf.Variable(tf.random_normal([self.feature_size, 1], 0.0, 0.05), # N * 1
name='first_order')
# One-hot编码后的输入层与Dense embeddings层的权值定义,即DNN的输入embedding。
self.weight['embedding_weight'] = tf.Variable(tf.random_normal([self.feature_size, self.embedding_size], 0.0, 0.05), # N*K
name='embedding_weight')
# deep网络部分的weight和bias, deep网络初始输入维度:input_size = F*K
num_layer = len(self.deep_layers)
input_size = self.field_size * self.embedding_size
# glorot_normal = np.sqrt(2.0 / (input_size + self.deep_layers[0])) # for sigmoid
he_normal = np.sqrt(2.0 /input_size) # for relu
self.weight['layer_0'] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(input_size, self.deep_layers[0])), dtype=np.float32)
self.weight['bias_0'] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(1, self.deep_layers[0])), dtype=np.float32)
# 生成deep network里面每层的weight 和 bias
for i in range(1, num_layer):
he_normal = np.sqrt(2.0 / (self.deep_layers[i - 1]))
self.weight['layer_' + str(i)] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(self.deep_layers[i - 1], self.deep_layers[i])),
dtype=np.float32)
self.weight['bias_' + str(i)] = tf.Variable(np.random.normal(loc=0, scale=he_normal, size=(1, self.deep_layers[i])),dtype=np.float32)
# deep部分output_size + 一次项output_size + 二次项output_size
last_layer_size = self.deep_layers[-1] + self.field_size + self.embedding_size
glorot_normal = np.sqrt(2.0 / (last_layer_size + 1))
# 生成最后一层的weight和bias
self.weight['last_layer'] = tf.Variable(np.random.normal(loc=0, scale=glorot_normal, size=(last_layer_size, 1)), dtype=np.float32)
self.weight['last_bias'] = tf.Variable(tf.constant(0.0), dtype=np.float32)
#2、----------------------前向传播------------------------------------
# None*F*K
self.embedding_index = tf.nn.embedding_lookup(self.weight['embedding_weight'],self.feat_index)
# [None*F*K] .*[None*F*1] = None*F*K
self.embedding_part = tf.multiply(self.embedding_index, tf.reshape(self.feat_value, [-1, self.field_size, 1]))
# FM部分一阶特征
# None * F*1
self.embedding_first = tf.nn.embedding_lookup(self.weight['first_order'],
self.feat_index)
#[None*F*1].*[None*F*1] = None*F*1
self.embedding_first = tf.multiply(self.embedding_first, tf.reshape(self.feat_value, [-1, self.field_size, 1]))
# None*F
self.first_order = tf.reduce_sum(self.embedding_first, 2)
# 二阶特征 None*K
self.sum_second_order = tf.reduce_sum(self.embedding_part, 1)
self.sum_second_order_square = tf.square(self.sum_second_order)
self.square_second_order = tf.square(self.embedding_part)
self.square_second_order_sum = tf.reduce_sum(self.square_second_order, 1)
# 1/2*((a+b)^2 - a^2 - b^2)=ab
# None*K
self.second_order = 0.5 * tf.subtract(self.sum_second_order_square, self.square_second_order_sum)
# FM部分的输出 None*(F+K)
self.fm_part = tf.concat([self.first_order, self.second_order], axis=1)
# DNN部分
# None*(F*K)
self.deep_embedding = tf.reshape(self.embedding_part, [-1, self.field_size * self.embedding_size])
# 全连接部分
for i in range(0, len(self.deep_layers)):
self.deep_embedding = tf.add(tf.matmul(self.deep_embedding, self.weight["layer_%d" % i]),
self.weight["bias_%d" % i])
# self.deep_embedding =tf.matmul(self.deep_embedding, self.weight["layer_%d" % i])
self.bn_out = tf.layers.batch_normalization(self.deep_embedding, training=self.is_training)
# self.bn_out = tf.layers.dropout(self.deep_embedding, rate=self.keep_prob,training=self.is_training)
self.deep_embedding = self.activate(self.bn_out)
self.deep_embedding = tf.layers.dropout(self.deep_embedding, rate =1.0-self.keep_prob, training= self.is_training)
# FM输出与DNN输出拼接 None*(F+K+layer[-1]])
din_all = tf.concat([self.fm_part, self.deep_embedding], axis=1)
#None*1
self.out = tf.add(tf.matmul(din_all, self.weight['last_layer']), self.weight['last_bias'])
#3. ------------------确定损失---------------------------------------
# loss部分 None*1
self.prob = tf.nn.sigmoid(self.out)
# self.entropy_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels= self.label, logits= self.out))
# self.entropy_loss = -tf.reduce_mean(
# self.label * tf.log(tf.clip_by_value(self.prob, 1e-10, 1.0))+ (1 - self.label)* tf.log(tf.clip_by_value(1-self.prob,1e-10,1.0)))
self.entropy_loss = focal_loss(self.prob, self.label, alpha=0.5, gamma=2)
# self.entropy_loss = weighted_binary_crossentropy(self.prob, self.label, pos_ratio=self.pos_ratio)
# 正则:sum(w^2)/2*l2_reg_coef
self.reg_loss = tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight["last_layer"])
for i in range(len(self.deep_layers)):
self.reg_loss += tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight["layer_%d" % i])
# tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(self.l2_reg_coef)(self.weight['layer_1']))
# print(self.entropy_loss.shape.as_list(), self.reg_loss.shape.as_list())
self.loss = self.entropy_loss + self.reg_loss
self.global_step = tf.Variable(0, trainable=False, name='global_step')
self.learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step,3000, 0.99,staircase=False)
opt = tf.train.AdamOptimizer(self.learning_rate)
# opt = tf.train.GradientDescentOptimizer(self.learning_rate)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
trainable_params = tf.trainable_variables()
gradients = tf.gradients(self.loss, trainable_params)
clip_gradients, _ = tf.clip_by_global_norm(gradients, 5)
with tf.control_dependencies(update_ops):
# self.train_op = opt.minimize(self.loss, global_step = self.global_step)
self.train_op = opt.apply_gradients(zip(clip_gradients, trainable_params), global_step=self.global_step)
self.saver = tf.train.Saver(max_to_keep=3)
def train(self, sess, feat_index, feat_value, label):
_, step = sess.run([self.train_op, self.global_step], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.label: label,
self.keep_prob: self.keep_prob_v,
self.is_training:True})
return step
def predict(self, sess, feat_index, feat_value, batch_size=None):
if batch_size is None:
prob = sess.run([self.prob], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.keep_prob: 1,
self.is_training:False})[0]
else:
data =Dataset(feat_value, feat_index, [None]*len(feat_index), batch_size, shuffle=False)
probs =[]
for feat_index, feat_value, _ in data:
prob = sess.run([self.prob], feed_dict={
self.feat_index: feat_index,
self.feat_value: feat_value,
self.keep_prob: 1,
self.is_training:False})[0]
probs.append(prob.ravel())
prob = | np.concatenate(probs) | numpy.concatenate |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @file gd_algorithm.py
# @brief
# @author QRS
# @blog qrsforever.github.io
# @version 1.0
# @date 2019-09-23 11:25:05
################################ jupyter-vim #######################################
# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim
# %pylab --no-import-all # noqa
#####################################################################################
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
np.random.seed(678)
#####################################################################################
# <codecell> activity function and derivative function
#####################################################################################
def sigmoid(x):
return 1 / (1 + np.exp(-1 * x))
def d_sigmoid(x):
return sigmoid(x) * (1 - sigmoid(x))
##
def tanh(x):
return np.tanh(x)
def d_tanh(x):
return 1 - np.tanh(x) ** 2
##
def relu(x):
mask = (x > 0.0) * 1.0
return x * mask
def d_relu(x):
mask = (x > 0.0) * 1.0
return mask
##
def elu(matrix):
mask = (matrix <= 0) * 1.0
less_zero = matrix * mask
safe = (matrix > 0) * 1.0
greater_zero = matrix * safe
final = 3.0 * (np.exp(less_zero) - 1) * less_zero
return greater_zero + final
def d_elu(matrix):
safe = (matrix > 0) * 1.0
mask2 = (matrix <= 0) * 1.0
temp = matrix * mask2
final = (3.0 * np.exp(temp))*mask2
return (matrix * safe) + final
#####################################################################################
# <codecell> train data
#####################################################################################
mnist = input_data.read_data_sets("/home/lidong/Datasets/ML/mnist", one_hot=False)
train = mnist.test
images, labels = train.images, train.labels
images.shape, labels.shape, labels[0:5]
## select 0,1 labels and images
zero_index, one_index = np.where(labels == 0)[0], np.where(labels == 1)[0]
zero_image, one_image = images[[zero_index]], images[[one_index]]
zero_label, one_label = np.expand_dims(labels[[zero_index]], axis=1), np.expand_dims(labels[[one_index]], axis=1)
zero_image.shape, one_image.shape, zero_label.shape, one_label.shape
## meld 0, 1 labels and images
images_org = np.vstack((zero_image, one_image))
labels_org = np.vstack((zero_label, one_label))
images_org.shape, labels_org.shape, labels_org[2:5], labels[2:5]
## shuffle method 1: sklearn.utils.shuffle
images, labels = shuffle(images_org, labels_org)
images.shape, labels.shape
## shuffle method 2: np.random.shuffle
# images_labels = np.hstack((images_org, labels_org))
# np.random.shuffle(images_labels)
# images, labels = images_labels[:, 0:-1], np.expand_dims(images_labels[:, -1], axis=1)
# images.shape, labels.shape
## train / test data
train_num, test_num = 50, 20
train_images, train_labels = images[0:train_num, :], labels[0:train_num, :]
test_images, test_labels = images[-test_num-1:-1, :], labels[-test_num-1:-1, :]
train_images.shape, test_images.shape
#####################################################################################
# <codecell> Graph
#####################################################################################
#
# *****
# * x * elu
# ***** ***** l1A
# * * tanh
# ***** ***** l2A
# ***** * *
# * * ***** ***** sigmoid
# ***** * * ***** l3A
# --------> ***** --------> --------> * *
# . . *****
# w1:784x256 . w2:256x128 . w3:128x1
# . . .
# .
# . *****
# ***** * *
# * * *****
# ***** *****
# * *
# *****
# 1x784 1x256 1x128 1x1
# input layer-1 layer-2 layer-3
#
# 损失函数:
#
# (sigmoid(w3 * tanh(w2 * elu(w1 * x))) - label)^2 * 0.2
# | | | | | ------ x d(w1)
# | | | | | l1
# | | | | +-------- d_elu(l1) d(l1)
# | | | | l1A
# | | | +-------------- l1A d(w2)
# | | | l2
# | | +------------------ d_tanh(l2) d(l2)
# | | l2A
# | +------------------------ l2A d_sigmoid(l3) (l3A - label) d(w3) |
# | l3 |
# +------------------------------ d_sigmoid(l3) (l3A - label) d(l3) |w3
# l3A |
# --------------------------------------------------- (l3A - label) d(l3A)|
# cost
#
#
# 0-9数字图像只选取了0和1, 所以简化模型, 采用全连接, 加最后一层的sigmoid,而不是softmax
# 矩阵求导是个难点, 需要基本的了解, 否则代码是很难理解, 什么时候转置, 什么时候点乘等.
#####################################################################################
# <codecell> Global param
#####################################################################################
## weight
_w1 = np.random.randn(784, 256) * 0.2
_w2 = np.random.randn(256, 128) * 0.2
_w3 = np.random.randn(128, 1) * 0.2
## hyper parameters
learn_rate = 0.0003
num_epoch = 100
cost_array = {}
#####################################################################################
# <codecell> SGD
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
# layer1
l1 = image.dot(w1)
l1A = elu(l1)
# layer2
l2 = l1A.dot(w2)
l2A = tanh(l2)
# layer3
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
# loss
total_cost += np.square(l3A - label).sum() * 0.5
# eval gradient
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32) # 128x1
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22) # 256x128
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12) # 784x256
# update weight
w3 = w3 - learn_rate * g3
w2 = w2 - learn_rate * g2
w1 = w1 - learn_rate * g1
if iter % 10 == 0:
print("SGD current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['sgd'] = cost_temp_array
#####################################################################################
# <codecell> Momentum
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
v1, v2, v3 = 0, 0, 0
alpha = 0.001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
v3 = v3 * alpha + learn_rate * g3
v2 = v2 * alpha + learn_rate * g2
v1 = v1 * alpha + learn_rate * g1
w3 = w3 - v3
w2 = w2 - v2
w1 = w1 - v1
if iter % 10 == 0:
print("Momentum current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Momentum'] = cost_temp_array
#####################################################################################
# <codecell> NAG: Nesterov accelerated gradient
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
v1, v2, v3 = 0, 0, 0
alpha = 0.001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
# 预知的能力, 提前使用动量的信息, 然后预知下一时刻的梯度
fake_w3 = w3 - alpha * v3
fake_w2 = w2 - alpha * v2
fake_w1 = w1 - alpha * v1
l1 = image.dot(fake_w1)
l1A = elu(l1)
l2 = l1A.dot(fake_w2)
l2A = tanh(l2)
l3 = l2A.dot(fake_w3)
l3A = sigmoid(l3)
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3_fake = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(fake_w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2_fake = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(fake_w2.T)
g12 = d_elu(l1)
g13 = image
g1_fake = g13.T.dot(g11 * g12)
v3 = v3 * alpha + learn_rate * g3_fake
v2 = v2 * alpha + learn_rate * g2_fake
v1 = v1 * alpha + learn_rate * g1_fake
w3 = w3 - v3
w2 = w2 - v2
w1 = w1 - v1
if iter % 10 == 0:
print("Nesterov accelerated gradient current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['NAG'] = cost_temp_array
#####################################################################################
# <codecell> Adagrad
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
vlr_1, vlr_2, vlr_3 = 0, 0, 0
epsilon = 0.00000001
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 累加梯度平方, 自适应
vlr_3 = vlr_3 + g3 ** 2 # 128x1
vlr_2 = vlr_2 + g2 ** 2 # 256x128
vlr_1 = vlr_1 + g1 ** 2 # 784x256
w3 = w3 - (learn_rate / np.sqrt(vlr_3 + epsilon)) * g3
w2 = w2 - (learn_rate / np.sqrt(vlr_2 + epsilon)) * g2
w1 = w1 - (learn_rate / np.sqrt(vlr_1 + epsilon)) * g1
if iter % 10 == 0:
print("Adagrad current Iter: ", iter, " Total Cost: ", total_cost)
cost_temp_array.append(total_cost)
cost_array['Adagrad'] = cost_temp_array
#####################################################################################
# <codecell> Adadelta
#####################################################################################
w1, w2, w3 = _w1, _w2, _w3
epsilon, gamma = 0.000001, 0.001
vlr_1, vlr_2, vlr_3 = 0, 0, 0
wlr_1, wlr_2, wlr_3 = 0, 0, 0
cost_temp_array = []
for iter in range(num_epoch):
total_cost = 0
for index in range(len(train_images)):
image = np.expand_dims(train_images[index], axis=0)
label = np.expand_dims(train_labels[index], axis=1)
l1 = image.dot(w1)
l1A = elu(l1)
l2 = l1A.dot(w2)
l2A = tanh(l2)
l3 = l2A.dot(w3)
l3A = sigmoid(l3)
total_cost += np.square(l3A - label).sum() * 0.5
g31 = l3A - label
g32 = d_sigmoid(l3)
g33 = l2A
g3 = g33.T.dot(g31 * g32)
g21 = (g31 * g32).dot(w3.T)
g22 = d_tanh(l2)
g23 = l1A
g2 = g23.T.dot(g21 * g22)
g11 = (g21 * g22).dot(w2.T)
g12 = d_elu(l1)
g13 = image
g1 = g13.T.dot(g11 * g12)
# 梯度平方和衰减平均
vlr_3 = gamma * vlr_3 + (1-gamma) * g3 ** 2
vlr_2 = gamma * vlr_2 + (1-gamma) * g2 ** 2
vlr_1 = gamma * vlr_1 + (1-gamma) * g1 ** 2
delta_3 = - (np.sqrt(wlr_3 + epsilon) / np.sqrt(vlr_3 + epsilon)) * g3
delta_2 = - ( | np.sqrt(wlr_2 + epsilon) | numpy.sqrt |
'''
Implement a Poisson 2D problem on a cracked domain with pure Dirichlet boundary conditions:
- \Delta u(x,y) = f(x,y) for (x,y) \in \Omega:= (-1,1)x(-1,1) \ (0,1)x{0}
u(r,\theta) = r^(1/2)*sin(\theta/2), for (r,\theta) \in \partial \Omega (with polar coordinates)
f(x,y) = 0
Problem from: <NAME> and <NAME> - The Deep Ritz method: A deep learning-based
numerical algorithm for solving variational problems, Section 3.1
Use adaptivity
'''
import tensorflow as tf
import numpy as np
from utils.PoissonEqAdapt import PoissonEquationColl
from utils.Geometry import QuadrilateralGeom
#make figures bigger on HiDPI monitors
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 200
import matplotlib.pyplot as plt
import time
print("Initializing domain...")
tf.reset_default_graph() # To clear the defined variables and operations of the previous cell
np.random.seed(1234)
tf.set_random_seed(1234)
#problem parameters
alpha = 0
#model paramaters
layers = [2, 30, 30, 30, 30, 1] #number of neurons in each layer
num_train_its = 10000 #number of training iterations
data_type = tf.float32
pen_dir = 500
pen_neu = 0
numIter = 3
numBndPts = 81
numIntPtsX = 21
numIntPtsY = 21
#generate points
domainSECorners = np.array([[0,-1],[1,-1],[1,0],[0,0]])
domainNECorners = np.array([[0,0],[1,0],[1,1],[0,1]])
domainWCorners = np.array([[-1,-1],[0,-1],[0,1],[-1,1]])
domainSEGeom = QuadrilateralGeom(domainSECorners)
domainNEGeom = QuadrilateralGeom(domainNECorners)
domainWGeom = QuadrilateralGeom(domainWCorners)
dirichlet_bottom_e_x, dirichlet_bottom_e_y, _, _ = domainSEGeom.getBottomPts(numBndPts)
dirichlet_right_s_x, dirichlet_right_s_y, _, _ = domainSEGeom.getRightPts(numBndPts)
dirichlet_crack_x, dirichlet_crack_y, _, _ = domainNEGeom.getBottomPts(numBndPts)
dirichlet_right_n_x, dirichlet_right_n_y, _, _ = domainNEGeom.getRightPts(numBndPts)
dirichlet_top_e_x, dirichlet_top_e_y, _, _, = domainNEGeom.getTopPts(numBndPts)
dirichlet_bottom_w_x, dirichlet_bottom_w_y, _, _ = domainWGeom.getBottomPts(numBndPts)
dirichlet_top_w_x, dirichlet_top_w_y, _, _ = domainWGeom.getTopPts(numBndPts)
dirichlet_left_x, dirichlet_left_y, _, _ = domainWGeom.getLeftPts(2*numBndPts)
interior_se_x, interior_se_y = domainSEGeom.getUnifIntPts(numIntPtsX, numIntPtsY, [0,0,0,1])
interior_ne_x, interior_ne_y = domainNEGeom.getUnifIntPts(numIntPtsX, numIntPtsY, [0,0,0,1])
interior_w_x, interior_w_y = domainWGeom.getUnifIntPts(numIntPtsX, 2*numIntPtsY, [0,0,0,0])
def compExSol(x,y):
r = np.sqrt(x**2+y**2)
t = np.arctan2(y,x)
t = np.where(t<0, t+2*np.pi, t)
u = np.sqrt(r)*np.sin(t/2)
return u
#generate boundary values
dirichlet_bottom_e_u = compExSol(dirichlet_bottom_e_x, dirichlet_bottom_e_y)
dirichlet_right_s_u = compExSol(dirichlet_right_s_x, dirichlet_right_s_y)
dirichlet_crack_u = compExSol(dirichlet_crack_x, dirichlet_crack_y)
dirichlet_right_n_u = compExSol(dirichlet_right_n_x, dirichlet_right_n_y)
dirichlet_top_e_u = compExSol(dirichlet_top_e_x, dirichlet_top_e_y)
dirichlet_bottom_w_u = compExSol(dirichlet_bottom_w_x, dirichlet_bottom_w_y)
dirichlet_top_w_u = compExSol(dirichlet_top_w_x, dirichlet_top_w_y)
dirichlet_left_u = compExSol(dirichlet_left_x, dirichlet_left_y)
#combine points
dirichlet_bottom_e_bnd = np.concatenate((dirichlet_bottom_e_x, dirichlet_bottom_e_y,
dirichlet_bottom_e_u), axis=1)
dirichlet_right_s_bnd = np.concatenate((dirichlet_right_s_x, dirichlet_right_s_y,
dirichlet_right_s_u), axis=1)
dirichlet_crack_bnd = np.concatenate((dirichlet_crack_x, dirichlet_crack_y,
dirichlet_crack_u), axis=1)
dirichlet_right_n_bnd = np.concatenate((dirichlet_right_n_x, dirichlet_right_n_y,
dirichlet_right_n_u), axis=1)
dirichlet_top_e_bnd = np.concatenate((dirichlet_top_e_x, dirichlet_top_e_y,
dirichlet_top_e_u), axis=1)
dirichlet_bottom_w_bnd = np.concatenate((dirichlet_bottom_w_x, dirichlet_bottom_w_y,
dirichlet_bottom_w_u), axis=1)
dirichlet_top_w_bnd = np.concatenate((dirichlet_top_w_x, dirichlet_top_w_y,
dirichlet_top_w_u), axis=1)
dirichlet_left_bnd = np.concatenate((dirichlet_left_x, dirichlet_left_y,
dirichlet_left_u), axis=1)
dirichlet_bnd = np.concatenate((dirichlet_bottom_e_bnd, dirichlet_right_s_bnd,
dirichlet_crack_bnd, dirichlet_right_n_bnd,
dirichlet_top_e_bnd, dirichlet_bottom_w_bnd,
dirichlet_top_w_bnd, dirichlet_left_bnd), axis=0)
neumann_bnd = np.zeros((1,5))
interior_se_x_flat = np.ndarray.flatten(interior_se_x)[np.newaxis]
interior_se_y_flat = np.ndarray.flatten(interior_se_y)[np.newaxis]
interior_ne_x_flat = np.ndarray.flatten(interior_ne_x)[np.newaxis]
interior_ne_y_flat = np.ndarray.flatten(interior_ne_y)[np.newaxis]
interior_w_x_flat = np.ndarray.flatten(interior_w_x)[np.newaxis]
interior_w_y_flat = np.ndarray.flatten(interior_w_y)[np.newaxis]
interior_x_flat = np.concatenate((interior_se_x_flat, interior_ne_x_flat, interior_w_x_flat), axis=1)
interior_y_flat = np.concatenate((interior_se_y_flat, interior_ne_y_flat, interior_w_y_flat), axis=1)
#generate interior values (f(x,y))
f_val = np.zeros_like(interior_x_flat)
X_int = np.concatenate((interior_x_flat.T, interior_y_flat.T, f_val.T), axis=1)
top_pred_X = np.zeros([0,3])
#adaptivity loop
rel_err = np.zeros(numIter)
rel_est_err = np.zeros(numIter)
numPts = np.zeros(numIter)
print('Defining model...')
model = PoissonEquationColl(dirichlet_bnd, neumann_bnd, alpha, layers, data_type, pen_dir, pen_neu)
for i in range(numIter):
#training part
X_int = np.concatenate((X_int, top_pred_X))
print('Domain geometry')
plt.scatter(neumann_bnd[:,0], neumann_bnd[:,1],s=0.5,c='g')
plt.scatter(dirichlet_bnd[:,0], dirichlet_bnd[:,1],s=0.5,c='r')
plt.scatter(X_int[:,0], X_int[:,1], s=0.5, c='b')
plt.show()
start_time = time.time()
print('Starting training...')
model.train(X_int, num_train_its)
elapsed = time.time() - start_time
print('Training time: %.4f' % (elapsed))
#generate points for evaluating the model
print('Evaluating model...')
numPredPtsX = 2*numIntPtsX
numPredPtsY = 2*numIntPtsY
pred_interior_se_x, pred_interior_se_y = domainSEGeom.getUnifIntPts(numPredPtsX, numPredPtsY, [1,1,1,1])
pred_interior_ne_x, pred_interior_ne_y = domainNEGeom.getUnifIntPts(numPredPtsX, numPredPtsY, [1,1,1,1])
pred_interior_w_x, pred_interior_w_y = domainWGeom.getUnifIntPts(numPredPtsX, 2*numPredPtsY, [1,1,1,1])
pred_interior_se_x_flat = np.ndarray.flatten(pred_interior_se_x)[np.newaxis]
pred_interior_se_y_flat = np.ndarray.flatten(pred_interior_se_y)[np.newaxis]
pred_interior_ne_x_flat = np.ndarray.flatten(pred_interior_ne_x)[np.newaxis]
pred_interior_ne_y_flat = np.ndarray.flatten(pred_interior_ne_y)[np.newaxis]
pred_interior_w_x_flat = np.ndarray.flatten(pred_interior_w_x)[np.newaxis]
pred_interior_w_y_flat = np.ndarray.flatten(pred_interior_w_y)[np.newaxis]
pred_se_X = np.concatenate((pred_interior_se_x_flat.T, pred_interior_se_y_flat.T), axis=1)
pred_ne_X = np.concatenate((pred_interior_ne_x_flat.T, pred_interior_ne_y_flat.T), axis=1)
pred_w_X = np.concatenate((pred_interior_w_x_flat.T, pred_interior_w_y_flat.T), axis=1)
u_pred_se, f_pred_se = model.predict(pred_se_X)
u_pred_ne, f_pred_ne = model.predict(pred_ne_X)
u_pred_w, f_pred_w = model.predict(pred_w_X)
u_pred = np.concatenate((u_pred_se, u_pred_ne, u_pred_w), axis=0)
f_pred = np.concatenate((f_pred_se, f_pred_ne, f_pred_w), axis=0)
#define exact solution
u_exact_se = compExSol(pred_interior_se_x_flat.T, pred_interior_se_y_flat.T)
u_exact_ne = compExSol(pred_interior_ne_x_flat.T, pred_interior_ne_y_flat.T)
u_exact_w = compExSol(pred_interior_w_x_flat.T, pred_interior_w_y_flat.T)
u_exact = np.concatenate((u_exact_se, u_exact_ne, u_exact_w), axis=0)
u_pred_err = u_exact-u_pred
error_u = (np.linalg.norm(u_exact-u_pred,2)/np.linalg.norm(u_exact,2))
print('Relative error u: %e' % (error_u))
def plotSol(pred_interior_se_x, pred_interior_ne_x, pred_interior_w_x,
pred_interior_se_y, pred_interior_ne_y, pred_interior_w_y,
val_se, val_ne, val_w, numPredPtsX, numPredPtsY):
min_val = min(min(val_se), min(val_ne), min(val_w))[0]
max_val = max(max(val_se), max(val_ne), max(val_w))[0]
val_se = np.resize(val_se, [numPredPtsY, numPredPtsX])
val_ne = | np.resize(val_ne, [numPredPtsY, numPredPtsX]) | numpy.resize |
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
import os
from fluids import *
import numpy as np
from math import pi, log10, log
from random import uniform
from numpy.testing import assert_allclose
from scipy.constants import *
from scipy.optimize import *
from scipy.interpolate import *
from fluids import fluids_data_dir
from fluids.core import Engauge_2d_parser
from fluids.optional.pychebfun import *
import pytest
def log_uniform(low, high):
return 10**uniform(log10(low), log10(high))
def test_fittings():
K = entrance_beveled_orifice(Di=0.1, do=.07, l=0.003, angle=45)
assert_allclose(K, 1.2987552913818574)
### Exits
assert_allclose(exit_normal(), 1.0)
K_helix = helix(Di=0.01, rs=0.1, pitch=.03, N=10, fd=.0185)
assert_allclose(K_helix, 14.525134924495514)
K_spiral = spiral(Di=0.01, rmax=.1, rmin=.02, pitch=.01, fd=0.0185)
assert_allclose(K_spiral, 7.950918552775473)
### Contractions
K_sharp = contraction_sharp(Di1=1, Di2=0.4)
assert_allclose(K_sharp, 0.5301269161591805)
K_beveled = contraction_beveled(Di1=0.5, Di2=0.1, l=.7*.1, angle=120)
assert_allclose(K_beveled, 0.40946469413070485)
### Expansions (diffusers)
K_sharp = diffuser_sharp(Di1=.5, Di2=1)
assert_allclose(K_sharp, 0.5625)
K = diffuser_curved(Di1=.25**0.5, Di2=1., l=2.)
assert_allclose(K, 0.2299781250000002)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07)
assert_allclose(K, 0.06873244301714816)
K = diffuser_pipe_reducer(Di1=.5, Di2=.75, l=1.5, fd1=0.07, fd2=.08)
assert_allclose(K, 0.06952256647393829)
# Misc
K1 = Darby3K(NPS=2., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K2 = Darby3K(NPS=12., Re=10000., name='Valve, Angle valve, 45°, full line size, β = 1')
K3 = Darby3K(NPS=12., Re=10000., K1=950, Ki=0.25, Kd=4)
Ks = [1.1572523963562353, 0.819510280626355, 0.819510280626355]
assert_allclose([K1, K2, K3], Ks)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000)
with pytest.raises(Exception):
Darby3K(NPS=12., Re=10000, name='fail')
tot = sum([Darby3K(NPS=2., Re=1000, name=i) for i in Darby.keys()])
assert_allclose(tot, 67.96442287975898)
K1 = Hooper2K(Di=2., Re=10000., name='Valve, Globe, Standard')
K2 = Hooper2K(Di=2., Re=10000., K1=900, Kinfty=4)
assert_allclose([K1, K2], [6.15, 6.09])
tot = sum([Hooper2K(Di=2., Re=10000., name=i) for i in Hooper.keys()])
assert_allclose(tot, 46.18)
with pytest.raises(Exception):
Hooper2K(Di=2, Re=10000)
with pytest.raises(Exception):
Hooper2K(Di=2., Re=10000, name='fail')
K2 = change_K_basis(K1=32.68875692997804, D1=.01, D2=.02)
assert_allclose(K2, 523.0201108796487)
### Entrances
def test_entrance_distance_45_Miller():
from fluids.fittings import entrance_distance_45_Miller
K = entrance_distance_45_Miller(Di=0.1, Di0=0.14)
assert_allclose(K, 0.24407641818143339)
def test_entrance_distance():
K1 = entrance_distance(0.1, t=0.0005)
assert_allclose(K1, 1.0154100000000004)
assert_allclose(entrance_distance(Di=0.1, t=0.05), 0.57)
K = entrance_distance(Di=0.1, t=0.0005, method='Miller')
assert_allclose(K, 1.0280427936730414)
K = entrance_distance(Di=0.1, t=0.0005, method='Idelchik')
assert_allclose(K, 0.9249999999999999)
K = entrance_distance(Di=0.1, t=0.0005, l=.02, method='Idelchik')
assert_allclose(K, 0.8475000000000001)
K = entrance_distance(Di=0.1, t=0.0005, method='Harris')
assert_allclose(K, 0.8705806231290558, 3e-3)
K = entrance_distance(Di=0.1, method='Crane')
assert_allclose(K, 0.78)
with pytest.raises(Exception):
entrance_distance(Di=0.1, t=0.01, method='BADMETHOD')
def test_entrance_rounded():
K = entrance_rounded(Di=0.1, rc=0.0235)
assert_allclose(K, 0.09839534618360923)
assert_allclose(entrance_rounded(Di=0.1, rc=0.2), 0.03)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Miller')
assert_allclose(K, 0.057734448458542094)
K = entrance_rounded(Di=0.1, rc=0.0235, method='Swamee')
assert_allclose(K, 0.06818838227156554)
K = entrance_rounded(Di=0.1, rc=0.01, method='Crane')
assert_allclose(K, .09)
K = entrance_rounded(Di=0.1, rc=0.01, method='Harris')
assert_allclose(K, 0.04864878230217168)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Harris')
assert_allclose(K, 0.0)
K = entrance_rounded(Di=0.1, rc=0.01, method='Idelchik')
assert_allclose(K, 0.11328005177738182)
# Limiting condition
K = entrance_rounded(Di=0.1, rc=0.0235, method='Idelchik')
assert_allclose(K, 0.03)
with pytest.raises(Exception):
entrance_rounded(Di=0.1, rc=0.01, method='BADMETHOD')
def test_entrance_beveled():
K = entrance_beveled(Di=0.1, l=0.003, angle=45)
assert_allclose(K, 0.45086864221916984)
K = entrance_beveled(Di=0.1, l=0.003, angle=45, method='Idelchik')
assert_allclose(K, 0.3995000000000001)
def test_entrance_sharp():
assert_allclose(entrance_sharp(), 0.57)
with pytest.raises(Exception):
entrance_sharp(method='BADMETHOD')
for method in ['Swamee', 'Blevins', 'Idelchik', 'Crane']:
assert_allclose(0.5, entrance_sharp(method=method))
entrance_sharp(method='Miller') # Don't bother checking a value for the Miller method
def test_entrance_angled():
K_30_Idelchik = 0.9798076211353316
assert_allclose(entrance_angled(30), K_30_Idelchik)
assert_allclose(entrance_angled(30, method='Idelchik'), K_30_Idelchik)
with pytest.raises(Exception):
entrance_angled(30, method='BADMETHOD')
### Bends
def test_bend_rounded_Crane():
K = bend_rounded_Crane(Di=.4020, rc=.4*5, angle=30)
assert_allclose(K, 0.09321910015613409)
K_max = bend_rounded_Crane(Di=.400, rc=.4*25, angle=30)
K_limit = bend_rounded_Crane(Di=.400, rc=.4*20, angle=30)
assert_allclose(K_max, K_limit)
def test_bend_rounded_Miller():
# Miller examples - 9.12
D = .6
Re = Reynolds(V=4, D=D, nu=1.14E-6)
kwargs = dict(Di=D, bend_diameters=2, angle=90, Re=Re, roughness=.02E-3)
K = bend_rounded_Miller(L_unimpeded=30*D, **kwargs)
assert_allclose(K, 0.1513266131915296, rtol=1e-4)# 0.150 in Miller- 1% difference due to fd
K = bend_rounded_Miller(L_unimpeded=0*D, **kwargs)
assert_allclose(K, 0.1414607344374372, rtol=1e-4) # 0.135 in Miller - Difference mainly from Co interpolation method, OK with that
K = bend_rounded_Miller(L_unimpeded=2*D, **kwargs)
assert_allclose(K, 0.09343184457353562, rtol=1e-4) # 0.093 in miller
def test_bend_rounded():
### Bends
K_5_rc = [bend_rounded(Di=4.020, rc=4.0*5, angle=i, fd=0.0163) for i in [15, 30, 45, 60, 75, 90]]
K_5_rc_values = [0.07038212630028828, 0.10680196344492195, 0.13858204974134541, 0.16977191374717754, 0.20114941557508642, 0.23248382866658507]
assert_allclose(K_5_rc, K_5_rc_values)
K_10_rc = [bend_rounded(Di=34.500, rc=36*10, angle=i, fd=0.0106) for i in [15, 30, 45, 60, 75, 90]]
K_10_rc_values = [0.061075866683922314, 0.10162621862720357, 0.14158887563243763, 0.18225270014527103, 0.22309967045081655, 0.26343782210280947]
assert_allclose(K_10_rc, K_10_rc_values)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, fd=0.0163)
assert_allclose(K, 0.106920213333191)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5)
assert_allclose(K, 0.11532121658742862)
K = bend_rounded(Di=4.020, bend_diameters=5, angle=30, Re=1E5, method='Miller')
assert_allclose(K, 0.10276501180879682)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Crane')
assert_allclose(K, 0.08959057097762159)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Ito')
assert_allclose(K, 0.10457946464978755)
K = bend_rounded(Di=.5, bend_diameters=5, angle=30, Re=1E5, method='Swamee')
assert_allclose(K, 0.055429466248839564)
def test_bend_miter():
K_miters = [bend_miter(i) for i in [150, 120, 90, 75, 60, 45, 30, 15]]
K_miter_values = [2.7128147734758103, 2.0264994448555864, 1.2020815280171306, 0.8332188430731828, 0.5299999999999998, 0.30419633092708653, 0.15308822558050816, 0.06051389308126326]
assert_allclose(K_miters, K_miter_values)
K = bend_miter(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20, method='Miller')
assert_allclose(K, 0.2944060416245167)
K = bend_miter(Di=.05, angle=45, Re=1e6, roughness=1e-5, method='Crane')
assert_allclose(K, 0.28597953150073047)
K = bend_miter(angle=45, Re=1e6, method='Rennels')
assert_allclose(K, 0.30419633092708653)
with pytest.raises(Exception):
bend_miter(angle=45, Re=1e6, method='BADMETHOD')
def test_bend_miter_Miller():
K = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K, 0.2944060416245167)
K_default_L_unimpeded = bend_miter_Miller(Di=.6, angle=45, Re=1e6, roughness=1e-5)
assert_allclose(K, K_default_L_unimpeded)
K_high_angle = bend_miter_Miller(Di=.6, angle=120, Re=1e6, roughness=1e-5, L_unimpeded=20)
K_higher_angle = bend_miter_Miller(Di=.6, angle=150, Re=1e6, roughness=1e-5, L_unimpeded=20)
assert_allclose(K_high_angle, K_higher_angle)
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_rounded_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(500):
Di = log_uniform(1e-5, 100)
rc = uniform(0, 100)
angle = uniform(0, 180)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_rounded_Miller(Di=Di, rc=rc, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
@pytest.mark.slow
@pytest.mark.fuzz
def test_bend_miter_Miller_fuzz():
# Tested for quite a while without problems
answers = []
for i in range(10**3):
Di = log_uniform(1e-5, 100)
angle = uniform(0, 120)
Re = log_uniform(1e-5, 1E15)
roughness = uniform(1e-10, Di*.95)
L_unimpeded = log_uniform(1e-10, Di*1000)
ans = bend_miter_Miller(Di=Di, angle=angle, Re=Re, roughness=roughness, L_unimpeded=L_unimpeded)
if np.isnan(ans) or np.isinf(ans):
raise Exception
answers.append(ans)
assert min(answers) >= 0
assert max(answers) < 1E10
### Diffusers
def test_diffuser_conical():
K1 = diffuser_conical(Di1=.1**0.5, Di2=1, angle=10., fd=0.020)
K2 = diffuser_conical(Di1=1/3., Di2=1, angle=50, fd=0.03) # 2
K3 = diffuser_conical(Di1=2/3., Di2=1, angle=40, fd=0.03) # 3
K4 = diffuser_conical(Di1=1/3., Di2=1, angle=120, fd=0.0185) # #4
K5 = diffuser_conical(Di1=2/3., Di2=1, angle=120, fd=0.0185) # Last
K6 = diffuser_conical(Di1=.1**0.5, Di2=1, l=3.908, fd=0.020)
Ks = [0.12301652230915454, 0.8081340270019336, 0.32533470783539786, 0.812308728765127, 0.3282650135070033, 0.12300865396254032]
assert_allclose([K1, K2, K3, K4, K5, K6], Ks)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, angle=1800., fd=0.020)
with pytest.raises(Exception):
diffuser_conical(Di1=.1, Di2=0.1, fd=0.020)
K1 = diffuser_conical_staged(Di1=1., Di2=10., DEs=[2,3,4,5,6,7,8,9], ls=[1,1,1,1,1,1,1,1,1], fd=0.01)
K2 = diffuser_conical(Di1=1., Di2=10.,l=9, fd=0.01)
Ks = [1.7681854713484308, 0.973137914861591]
assert_allclose([K1, K2], Ks)
# Idelchilk
Ks_Idelchik = [diffuser_conical(Di1=.1**0.5, Di2=1, l=l, method='Idelchik') for l in [.1, .5, 1, 2, 3, 4, 5, 20]]
Ks_Idelchik_expect = [0.8617385829640242, 0.9283647028367953, 0.7082429168951839, 0.291016580744589, 0.18504484868875992, 0.147705693811332, 0.12911637682462676, 0.17]
assert_allclose(Ks_Idelchik, Ks_Idelchik_expect, rtol=1e-2)
### Contractions
def test_contraction_conical_Crane():
K2 = contraction_conical_Crane(Di1=0.0779, Di2=0.0525, l=0)
assert_allclose(K2, 0.2729017979998056)
def test_contraction_round():
K_round = contraction_round(Di1=1, Di2=0.4, rc=0.04)
assert_allclose(K_round, 0.1783332490866574)
K = contraction_round(Di1=1, Di2=0.4, rc=0.04, method='Miller')
assert_allclose(K, 0.085659530512986387)
K = contraction_round(Di1=1, Di2=0.4, rc=0.04, method='Idelchik')
assert_allclose(K, 0.1008)
with pytest.raises(Exception):
contraction_round(Di1=1, Di2=0.4, rc=0.04, method='BADMETHOD')
def test_contraction_round_Miller():
K = contraction_round_Miller(Di1=1, Di2=0.4, rc=0.04)
assert_allclose(K, 0.085659530512986387)
def test_contraction_conical():
K_conical1 = contraction_conical(Di1=0.1, Di2=0.04, l=0.04, fd=0.0185)
K_conical2 = contraction_conical(Di1=0.1, Di2=0.04, angle=73.74, fd=0.0185)
assert_allclose([K_conical1, K_conical2], [0.15779041548350314, 0.15779101784158286])
with pytest.raises(Exception):
contraction_conical(Di1=0.1, Di2=0.04, fd=0.0185)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Rennels')
assert_allclose(K, 0.47462419839494946)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Idelchik')
assert_allclose(K, 0.391723)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Crane')
assert_allclose(K, 0.41815380146594)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Swamee')
assert_allclose(K, 0.4479863925376303)
K = contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='Blevins')
assert_allclose(K, 0.365)
K = contraction_conical(Di1=0.1, Di2=0.04, l=0.04, Re=1E6, method='Miller')
assert_allclose(K, 0.0918289683812792)
# high l ratio rounding
K = contraction_conical(Di1=0.1, Di2=0.06, l=0.04, Re=1E6, method='Miller')
assert_allclose(K, 0.08651515699621345)
# low a ratio rounding
K = contraction_conical(Di1=0.1, Di2=0.099, l=0.04, Re=1E6, method='Miller')
assert_allclose(K, 0.03065262382984957)
# low l ratio
K = contraction_conical(Di1=0.1, Di2=0.04, l=0.001, Re=1E6, method='Miller')
assert_allclose(K, 0.5)
# high l ratio rounding
K = contraction_conical(Di1=0.1, Di2=0.05, l=1, Re=1E6, method='Miller')
assert_allclose(K, 0.04497085709551787)
with pytest.raises(Exception):
contraction_conical(Di1=0.1, Di2=.04, l=.004, Re=1E6, method='BADMETHOD')
### Valves
def test_valve_coefficients():
Cv = Kv_to_Cv(2)
assert_allclose(Cv, 2.3121984567073133)
Kv = Cv_to_Kv(2.312)
assert_allclose(Kv, 1.9998283393826013)
K = Kv_to_K(2.312, .015)
assert_allclose(K, 15.15337460039990)
Kv = K_to_Kv(15.15337460039990, .015)
assert_allclose(Kv, 2.312)
# Two way conversions
K = Cv_to_K(2.712, .015)
assert_allclose(K, 14.719595348352552)
assert_allclose(K, Kv_to_K(Cv_to_Kv(2.712), 0.015))
Cv = K_to_Cv(14.719595348352552, .015)
assert_allclose(Cv, 2.712)
assert_allclose(Cv, Kv_to_Cv(K_to_Kv(14.719595348352552, 0.015)))
# Code to generate the Kv Cv conversion factor
# Round 1 trip; randomly assume Kv = 12, rho = 900; they can be anything
# an tit still works
dP = 1E5
rho = 900.
Kv = 12.
Q = Kv/3600.
D = .01
V = Q/(pi/4*D**2)
K = dP/(.5*rho*V*V)
good_K = K
def to_solve(x):
from scipy.constants import gallon, minute, hour, psi
conversion = gallon/minute*hour # from gpm to m^3/hr
dP = 1*psi
Cv = Kv*x*conversion
Q = Cv/3600
D = .01
V = Q/(pi/4*D**2)
K = dP/(.5*rho*V*V)
return K - good_K
from scipy.optimize import newton
ans = newton(to_solve, 1.2)
assert_allclose(ans, 1.1560992283536566)
def test_K_gate_valve_Crane():
K = K_gate_valve_Crane(D1=.01, D2=.02, angle=45, fd=.015)
assert_allclose(K, 14.548553268047963)
K = K_gate_valve_Crane(D1=.1, D2=.1, angle=0, fd=.015)
assert_allclose(K, 0.12)
# non-smooth transition test
K = K_gate_valve_Crane(D1=.1, D2=.146, angle=45, fd=.015)
assert_allclose(K, 2.5577948931946746)
K = K_gate_valve_Crane(D1=.1, D2=.146, angle=45.01, fd=.015)
assert_allclose(K, 2.5719286772143595)
K = K_gate_valve_Crane(D1=.1, D2=.146, angle=13.115)
assert_allclose(K, 1.1466029421844073, rtol=1e-4)
def test_K_globe_valve_Crane():
K = K_globe_valve_Crane(.01, .02, fd=.015)
assert_allclose(K, 87.1)
assert_allclose(K_globe_valve_Crane(.01, .01, fd=.015), .015*340)
K = K_globe_valve_Crane(.01, .02)
assert_allclose(K, 135.9200548324305)
def test_K_angle_valve_Crane():
K = K_angle_valve_Crane(.01, .02, fd=.016)
assert_allclose(K, 19.58)
K = K_angle_valve_Crane(.01, .02, fd=.016, style=1)
assert_allclose(K, 43.9)
K = K_angle_valve_Crane(.01, .01, fd=.016, style=1)
assert_allclose(K, 2.4)
with pytest.raises(Exception):
K_angle_valve_Crane(.01, .02, fd=.016, style=-1)
K = K_angle_valve_Crane(.01, .02)
assert_allclose(K, 26.597361811128465)
def test_K_swing_check_valve_Crane():
K = K_swing_check_valve_Crane(D=.1, fd=.016)
assert_allclose(K, 1.6)
K = K_swing_check_valve_Crane(D=.1, fd=.016, angled=False)
assert_allclose(K, 0.8)
K = K_swing_check_valve_Crane(D=.02)
assert_allclose(K, 2.3974274785373257)
def test_K_lift_check_valve_Crane():
K = K_lift_check_valve_Crane(.01, .02, fd=.016)
assert_allclose(K, 21.58)
K = K_lift_check_valve_Crane(.01, .01, fd=.016)
assert_allclose(K, 0.88)
K = K_lift_check_valve_Crane(.01, .01, fd=.016, angled=False)
assert_allclose(K, 9.6)
K = K_lift_check_valve_Crane(.01, .02, fd=.016, angled=False)
assert_allclose(K, 161.1)
K = K_lift_check_valve_Crane(.01, .02)
assert_allclose(K, 28.597361811128465)
def test_K_tilting_disk_check_valve_Crane():
K = K_tilting_disk_check_valve_Crane(.01, 5, fd=.016)
assert_allclose(K, 0.64)
K = K_tilting_disk_check_valve_Crane(.25, 5, fd=.016)
assert_allclose(K, .48)
K = K_tilting_disk_check_valve_Crane(.9, 5, fd=.016)
assert_allclose(K, 0.32)
K = K_tilting_disk_check_valve_Crane(.01, 15, fd=.016)
assert_allclose(K, 1.92)
K = K_tilting_disk_check_valve_Crane(.25, 15, fd=.016)
| assert_allclose(K, 1.44) | numpy.testing.assert_allclose |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import uic
import gmi_pathdialog
import sys, os
import numpy as np
import gmi_misc
def switch_path(pth):
old_cwd = os.getcwd()
try:
os.chdir(pth)
except:
gmi_misc.error('CAN NOT OPEN WORKING DIRECTORY '+ pth + ', ABORTING...')
return old_cwd
def switch_path_back(pth):
os.chdir(pth)
return
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if sys.platform == 'darwin':
uic.loadUi("gmi_mainwindow.ui", self)
elif sys.platform == 'linux':
uic.loadUi("gmi_mainwindow_linux.ui", self)
else:
uic.loadUi("gmi_mainwindow.ui", self)
#opened path
self.working_directory_opened = False
self.GMI_PATH = ''
#main window
self.label_CurrentFolder = self.findChild(QtWidgets.QLabel, 'label_CurrentFolder')
self.label_CurrentFolder.setText(self.GMI_PATH)
self.pushButton_ChangeFolder = self.findChild(QtWidgets.QPushButton, 'pushButton_ChangeFolder')
self.pushButton_ChangeFolder.clicked.connect(self.OpenWorkingDirectory)
self.pushButton_Exit = self.findChild(QtWidgets.QPushButton, 'pushButton_Exit')
self.pushButton_Exit.clicked.connect(self.close)
#config editor tab
self.textEdit_ConfigEditor = self.findChild(QtWidgets.QTextEdit, 'textEdit_ConfigEditor')
self.textEdit_ConfigEditor.textChanged.connect(self.EnableConfigSave)
self.pushButton_SaveConfig = self.findChild(QtWidgets.QPushButton, 'pushButton_SaveConfig')
self.pushButton_SaveConfig.clicked.connect(self.SaveConfig)
#plots
self.zoomfactor = 0.95
self.graphicsScene_PlotScene = QtWidgets.QGraphicsScene()
self.graphicsView_PlotView = self.findChild(QtWidgets.QGraphicsView, 'graphicsView_PlotView')
self.pushButton_Plot = self.findChild(QtWidgets.QPushButton, 'pushButton_Plot')
self.pushButton_Plot.clicked.connect(self.plot)
self.pushButton_Spectrum = self.findChild(QtWidgets.QPushButton, 'pushButton_Spectrum')
self.pushButton_Spectrum.clicked.connect(self.spec)
self.pushButton_ZoomIn = self.findChild(QtWidgets.QPushButton, 'pushButton_ZoomIn')
self.pushButton_ZoomIn.clicked.connect(self.plot_zoomin)
self.pushButton_ZoomOut = self.findChild(QtWidgets.QPushButton, 'pushButton_ZoomOut')
self.pushButton_ZoomOut.clicked.connect(self.plot_zoomout)
self.comboBox_GridList = self.findChild(QtWidgets.QComboBox, 'comboBox_GridList')
self.comboBox_GridList.currentIndexChanged.connect(self.EnablePlotting)
#stages
self.checksums = False
self.checkBox_InspectChecksums = self.findChild(QtWidgets.QCheckBox, 'checkBox_InspectChecksums')
if self.checksums:
self.checkBox_InspectChecksums.setCheckState(QtCore.Qt.Checked)
else:
self.checkBox_InspectChecksums.setCheckState(QtCore.Qt.Unchecked)
self.checkBox_InspectChecksums.stateChanged.connect(self.InspectChecksums)
self.stages_updated = False
self.tabs = self.findChild(QtWidgets.QTabWidget, 'tabWidget_MainTabs')
self.tabs.currentChanged.connect(self.UpdateStages)
self.pushButton_CreateTesseroidModel = self.findChild(QtWidgets.QPushButton, 'pushButton_CreateTesseroidModel')
self.pushButton_CreateTesseroidModel.clicked.connect(self.CreateTesseroidModel)
self.label_IndicatorCreateTesseroidModel = self.findChild(QtWidgets.QLabel, 'label_IndicatorCreateTesseroidModel')
self.pushButton_CalculateTesseroidsFields = self.findChild(QtWidgets.QPushButton, 'pushButton_CalculateTesseroidsFields')
self.pushButton_CalculateTesseroidsFields.clicked.connect(self.CalculateTesseroidsFields)
self.label_IndicatorCalculateTesseroidsFields = self.findChild(QtWidgets.QLabel, 'label_IndicatorCalculateTesseroidsFields')
self.pushButton_CreateDesignMatrix = self.findChild(QtWidgets.QPushButton, 'pushButton_CreateDesignMatrix')
self.pushButton_CreateDesignMatrix.clicked.connect(self.CreateDesignMatrix)
self.label_IndicatorCreateDesignMatrix = self.findChild(QtWidgets.QLabel, 'label_IndicatorCreateDesignMatrix')
self.pushButton_ExecuteAll = self.findChild(QtWidgets.QPushButton, 'pushButton_ExecuteAll')
self.pushButton_ExecuteAll.clicked.connect(self.ExecuteAll)
#inversion
self.pushButton_Invert = self.findChild(QtWidgets.QPushButton, 'pushButton_Invert')
self.pushButton_Invert.clicked.connect(self.Invert)
self.listWidget_ResultsList = self.findChild(QtWidgets.QListWidget, 'listWidget_ResultsList')
self.listWidget_ResultsList.currentItemChanged.connect(self.EnableResultPlotting)
self.graphicsScene_ResultPlotScene = QtWidgets.QGraphicsScene()
self.graphicsView_ResultPlotView = self.findChild(QtWidgets.QGraphicsView, 'graphicsView_ResultPlotView')
if not self.working_directory_opened:
self.OpenWorkingDirectory()
def EnableResultPlotting(self):
import gmi_config
import gmi_gmt
import os
old_cwd = switch_path(self.GMI_PATH)
config = gmi_config.read_config()
try:
current_opt = str(self.listWidget_ResultsList.currentItem().text())
except:
current_opt = '_none_'
gmi_misc.debug('bug to fix with restarting inversion win new paramters')
self.listWidget_ResultsList.repaint()
print (current_opt)
if(os.path.exists(current_opt)):
gmi_misc.info(str(current_opt) + ' is selected for plotting')
#self.pushButton_Plotresult.setEnabled(True)
self.graphicsView_ResultPlotView.setEnabled(True)
#self.pushButton_Plotresult.repaint()
else:
#self.pushButton_Plotresult.setDisabled(True)
self.graphicsView_ResultPlotView.setDisabled(True)
#self.pushButton_Plotresult.repaint()
switch_path_back(old_cwd)
self.PlotResult()
def PlotResult(self):
import gmi_config
import gmi_gmt
old_cwd = switch_path(self.GMI_PATH)
self.graphicsScene_ResultPlotScene.clear() #new thing
config = gmi_config.read_config()
output_folder = gmi_misc.init_result_folder()
fname = str(self.listWidget_ResultsList.currentItem().text())
indrem = 3
if '.dat' in fname:
current_plot = output_folder + '/' + fname
dat = np.loadtxt(current_plot)
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.plot(dat[:, 0], dat[:, 1])
ax.set(xlabel='i', ylabel='val',
title='.dat file plot')
ax.grid()
fig.savefig("temp.png")
#plt.show()
plt.clf()
plt.close()
indrem = 3
elif '.spec' in fname:
current_plot = output_folder + '/' + fname
dat = np.loadtxt(current_plot)
import matplotlib.pyplot as plt
plt.plot(dat[1:, 0], np.log10(dat[:, 1])[1:], '-', lw=0.6)
a_yticks = np.array([1, 0.1, 0.01, 0.001, 0.0001])
plt.yticks(np.log10(a_yticks), a_yticks.astype(str))
a_xticks = np.append( | np.array([1]) | numpy.array |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from sklearn.decomposition import PCA
def OrderedData(FinalClust_rep,X):
OrderedX = {t:[] for t in | np.unique(FinalClust_rep) | numpy.unique |
from __future__ import division
import numpy
import theano.tensor as T
import theano
from theano.tensor.signal import pool
from theano.tensor.nnet import conv2d
import six.moves.cPickle as pickle
import timeit
import scipy.io
import matplotlib.pyplot as plt
from Adam import adam
class LogisticRegression(object):
def __init__(self, input, n_in, n_out):
self.W = theano.shared(
value=numpy.zeros(
(n_in, n_out),
dtype=theano.config.floatX
),
name='W',
borrow=True
)
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# symbolic description of how to compute prediction as class whose
# probability is maximal
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
# end-snippet-2
# Negative log likelihood should be replaced by sigmoid for training, need to be checked again. For the correlation lenght cases.
# For the New Gaussian Data, the cost should be investigated again.
def sigmoid_cost_function(self, y):
# This is only for fvector
return T.mean(T.switch(T.eq(y, 1), -T.log(self.p_y_given_x), -T.log(1-self.p_y_given_x)))
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
if y.dtype.startswith('int'):
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
class LogisticRegression_2(object):
def __init__(self, input, n_in, n_out, rng):
# start-snippet-1
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = theano.shared(
value=numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)), dtype=theano.config.floatX),
name='W',
borrow=True
)
# self.W = theano.shared(
# value=numpy.zeros(
# (n_in, n_out),
# dtype=theano.config.floatX
# ),
# name='W',
# borrow=True
# )
# initialize the biases b as a vector of n_out 0s
self.b = theano.shared(
value=numpy.zeros(
(n_out,),
dtype=theano.config.floatX
),
name='b',
borrow=True
)
# symbolic expression for computing the matrix of class-membership
# probabilities
# Where:
# W is a matrix where column-k represent the separation hyperplane for
# class-k
# x is a matrix where row-j represents input training sample-j
# b is a vector where element-k represent the free parameter of
# hyperplane-k
self.output = T.nnet.sigmoid(T.dot(input, self.W) + self.b)
# self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)
# self.y_pred = T.round(self.output)
# T.dot(input, self.W) + self.b
self.prep_y = T.argmax(self.output, axis=1)
# end-snippet-1
# parameters of the model
self.params = [self.W, self.b]
# keep track of model input
self.input = input
def negative_log_likelihood(self, y):
# This is not really good as the relu may resulting output 0, and returning nan
return -T.mean(y*T.log(self.output) + (1-y)*T.log(1-self.output))
def sigmoid_cost_function(self, y):
return T.mean(T.switch(T.eq(y, 1), -T.log(self.output), -T.log(1-self.output)))
def mse_cost_function(self, y):
return T.mean(T.square(y - self.output))
def errors(self, y):
if y.ndim != self.output.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.output.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('float'):
return T.mean(T.square(y - self.output))
#T.mean(T.neq(self.y_pred, y))
#T.mean(T.switch(T.eq(y, 1), -T.log(self.output), -T.log(1-self.output)))
#T.mean(T.square(y - self.output))
#1 - T.mean(T.all(T.isclose(y, self.output, rtol=0, atol=0.02), axis=1))
#T.mean(T.sqr(y - self.output))
#1 - T.mean(T.all(T.isclose(y, self.output, rtol=0, atol=0.5), axis=1))
#1 - T.mean(T.all(T.isclose(y, self.output, rtol=0, atol=0.2), axis=1))
# T.abs_(T.mean(T.invert(T.all(T.isclose(self.output, y, rtol=0.005, atol=0.3), axis=1))))
else:
raise NotImplementedError()
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.nnet.relu):
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6 / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
class LeNetConvPoolLayer(object):
"""Pool Layer of a convolutional network """
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dtensor4
:param input: symbolic image tensor, of shape image_shape
:type filter_shape: tuple or list of length 4
:param filter_shape: (number of filters, num input feature maps,
filter height, filter width)
:type image_shape: tuple or list of length 4
:param image_shape: (batch size, num input feature maps,
image height, image width)
:type poolsize: tuple or list of length 2
:param poolsize: the downsampling (pooling) factor (#rows, #cols)
"""
assert image_shape[1] == filter_shape[1]
self.input = input
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) //
numpy.prod(poolsize))
# initialize weights with random weights
W_bound = | numpy.sqrt(6. / (fan_in + fan_out)) | numpy.sqrt |
import numpy as np
from smolyak.applications.polynomials.orthogonal_polynomials import evaluate_orthonormal_polynomials
import random
import math
def arcsine_samples(probability_space,N):
def univariate_arcsine_samples(N,interval):
X_temp = (np.cos(np.pi * np.random.rand(int(N), 1)) + 1) / 2
X = interval[0] + X_temp * (interval[1] - interval[0])
D = 1./(np.pi * np.sqrt((X - interval[0]) * (interval[1] - X)))
return (X,D)
X = np.zeros((N, probability_space.get_c_var()))
D = np.ones((N, 1))
for dim in range(X.shape[1]):
(X_temp, D_temp) = univariate_arcsine_samples(N,probability_space.ups[dim].interval)
X[:, [dim]] = X_temp
D *= D_temp
return (X,D)
def importance_samples(probability_space,N,importance):
if importance == 'arcsine':
(X,D)=arcsine_samples(probability_space, N)
W=probability_space.lebesgue_density(X)/D
return (X, W)
else:
raise ValueError('Sampling measure not implemented yet')
def optimal_samples(tensor_polynomial_subspace, N):
X = np.zeros((N, tensor_polynomial_subspace.get_c_var()))
for i in range(N):
j = random.randrange(0, tensor_polynomial_subspace.get_dimension())
for dim in range(tensor_polynomial_subspace.get_c_var()):
degree = tensor_polynomial_subspace.basis[j][dim]
X[i, dim] = sample_from_polynomial(tensor_polynomial_subspace.probability_distribution.ups[dim],degree)
W = tensor_polynomial_subspace.optimal_weights(X)
return (X, W)
def samples_per_polynomial(tensor_polynomial_subspace,old_basis,pols,c_samples):
l_old = len(old_basis)
N_new = math.ceil(c_samples(len(pols)))
N_add = math.ceil(N_new) - math.ceil(c_samples(l_old))
news = np.zeros(len(pols))
for j,pol in enumerate(pols):
if not pol in old_basis:
news[j] = True
N = int(N_new * np.sum(news)+N_add*(len(pols)- | np.sum(news) | numpy.sum |
"""Module providing handling of the LAT point spread function.
$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/irfs/psf.py,v 1.4 2016/06/30 00:02:42 wallacee Exp $
Author: <NAME>
"""
__version__='$Revision: 1.4 $'
import os
import numpy as np
from astropy.io import fits
from scipy import integrate
from uw.utilities import keyword_options
from . import caldb, IrfError
class PSF(object):
"""Object representing the LAT PSF."""
def __init__(self,filename,exposure=None,rpsf_extension='RPSF',psf_scaling_extension='PSF_SCALING'):
self._load_data(filename,rpsf_extension,psf_scaling_extension)
self.set_weights(exposure)
def _load_data(self,filename,rpsf,scaling):
rpsf = fits.getdata(filename,rpsf)[0]
psf_scaling = fits.getdata(filename,scaling)[0]
self.ebins = np.vstack([rpsf.field('ENERG_LO'),rpsf.field('ENERG_HI')]).T
self.cthetabins = np.vstack([rpsf.field('CTHETA_LO'),rpsf.field('CTHETA_HI')]).T
self.scale_parameters = psf_scaling.field('PSFSCALE')
def _normalize():
#Normalize parameters. Logic copied from like.pypsf.
#I'm still confused about some of the conventions -EEW
sf = self.scale_function(self.ebins.prod(axis=1)**.5)
normc = self.psf_base_integral(np.pi/2,self.score*sf,self.gcore)
normt = self.psf_base_integral(np.pi/2,self.score*sf,self.gtail)
# NB leave scale factor out here so we can adjust norm to
# a particular energy (effectively cancelling in integral)
norm = (2*np.pi*(normc*self.score**2+
normt*self.ntail*self.stail**2))**-1
self.ncore = norm # adjust NCORE
self.ntail *= norm # set to NTAIL*NCORE
if 'SCORE' in rpsf.array.dtype.names:
self.ncore = rpsf.field('NCORE')
self.score = rpsf.field('SCORE')
self.gcore = rpsf.field('GCORE')
self.ntail = rpsf.field('NTAIL')
self.stail = rpsf.field('STAIL')
self.gtail = rpsf.field('GTAIL')
_normalize()
else:
#Old style (P6)
#TODO: Check that this ncore gives the correct normalization
self.score = self.stail = rpsf.field('SIGMA')
self.gcore = rpsf.field('GCORE')
self.gtail = rpsf.field('GTAIL')
self.ncore = np.ones_like(rpsf.field('SIGMA'))
self.ntail = np.zeros_like(rpsf.field('SIGMA'))
def __getitem__(self,mask):
"""Return PSF parameters for a given energy and cos(theta) selection"""
return np.concatenate([getattr(self,p)[mask][None]
for p in ('ncore','ntail','score','stail','gcore','gtail')])
def set_weights(self,exposure=None):
"""Set weights to use for exposure-weighted averages."""
self.weights = None
#if exposure is None:
# self.weights = None
#else:
# self.weights =
def scale_function(self,e):
"""Compute the PSF scale factor for energy `e`.
Parameter
---------
e
Energy to evaluate the scale factor at, in MeV. May be a scalar
or a numpy array.
Returns
-------
sp
The scale factor at the requested energy, defined as
:math:`\sqrt{\left(c_0(\frac{E}{100 MeV})^{-\beta}\right)^2+c_1^2}`.
The return type is the same as that of `e`.
"""
c0,c1,beta = self.scale_parameters
return np.sqrt( (c0*(np.asarray(e)/100.)**-beta)**2 + c1**2)
def __call__(self,delta,e):
"""Compute the PSF density at angular deviation delta and energy e.
If exposure weights have been set by the `set_weights` method, the
calculated density will be a weighted average over inclination angle.
Otherwise, only the on-axis value will be calculated.
Parameters
----------
delta : array_like
Angular deviation(s) at which to evaluate the PSF, in radians.
e : array_like
Energy (or energies) at which to evaluate the PSF, in MeV.
ctheta : array_like, optional
Cosine of the inclination angle, theta, for which the PSF is
to be evaluated. The default is None, indicating that the
computed density should be averaged over the inclination angle.
If exposure information has been provided through the
`set_exposure` method, the average over theta will be weighted
by the exposure.
Returns
-------
density: float or array
The PSF density at angular distance `delta` and energy `e`. If
either parameter is an array, the return value is an array of
shape (n_delta, n_e).
"""
scale = self.scale_function(e)
mask = np.fmin(np.searchsorted(self.ebins[:,1],e),self.ebins.shape[0]-1)
if self.weights is None:
nc,nt,sc,st,gc,gt = self[-1,mask]
kc,kt = [self.psf_base(delta,s*scale,g)
for s,g in zip((sc,st),(gc,gt))]
return (nc*kc+nt*kt)/scale**2
else:
nc,nt,sc,st,gc,gt = self[:,mask]
kc,kt = [self.psf_base(delta,s*scale,g)
for s,g in zip((sc,st),(gc,gt))]
return (self.weights[:,mask]*(nc*kc+nt*kt)/scale**2).sum(axis=-2)
def psf_base(self,delta,sigma,gamma):
"""Evaluate the King function at angular deviation delta.
Parameters
----------
delta : array_like
The angular deviation in radians at which the function is to be
evaluated. May be a scalar or a numpy array.
sigma, gamma : array_like
The parameters of the King function. May be scalars or arrays of
the same size.
Returns
-------
psf_base : float or array
If `delta`, `sigma`, and `gamma` are all scalars, a scalar is
returned. Otherwise, the return value is an array of dimension
len(`delta`) by len(`sigma`).
"""
return_scalar = np.all([np.isscalar(x) for x in (delta,sigma,gamma)])
d,s,g = (np.asarray(x) for x in (delta,sigma,gamma))
if s.shape!=g.shape:
raise ValueError('Arrays for sigma and gamma must have the same shape')
u = (.5* | np.outer(d,1/s) | numpy.outer |
import os,warnings
warnings.filterwarnings("ignore")
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import tensorflow.contrib.slim as slim
import scipy.io as sio
from sklearn.utils import shuffle
from util import gpusession,nzr,print_n_txt,remove_file_if_exists
import matplotlib.pyplot as plt
class mlp_reg_class(object):
def __init__(self,_name='mlp_reg',_x_dim=1,_y_dim=1,_h_dims=[64, 64],_actv=tf.nn.tanh,_bn=slim.batch_norm,
_l2_reg_coef=1e-5,_GPU_ID=0,_L1_LOSS=False,_ROBUST_LOSS=False,_LEAKY_ROBUST_LOSS=False,_VERBOSE=True):
self.name = _name
self.x_dim = _x_dim
self.y_dim = _y_dim
self.h_dims = _h_dims
self.actv = _actv
self.bn = _bn
self.l2_reg_coef = _l2_reg_coef
self.GPU_ID = _GPU_ID
self.L1_LOSS = _L1_LOSS
self.ROBUST_LOSS = _ROBUST_LOSS
self.LEAKY_ROBUST_LOSS = _LEAKY_ROBUST_LOSS
self.VERBOSE = _VERBOSE
if _GPU_ID < 0: # with CPU only (no GPU)
# Build model
self.build_model()
# Build graph
self.build_graph()
# Check params
self.check_params()
else: # with GPU
with tf.device('/device:GPU:%d' % (self.GPU_ID)):
# Build model
self.build_model()
# Build graph
self.build_graph()
# Check params
self.check_params()
def build_model(self):
self.x = tf.placeholder(dtype=tf.float32,shape=[None,self.x_dim]) # Input [N x xdim]
self.y = tf.placeholder(dtype=tf.float32,shape=[None,self.y_dim]) # Output [N x ydim]
self.kp = tf.placeholder(dtype=tf.float32,shape=[]) # Keep probability
self.lr = tf.placeholder(dtype=tf.float32,shape=[]) # Learning rate
self.is_training = tf.placeholder(dtype=tf.bool,shape=[]) # Training flag
self.fully_init = tf.random_normal_initializer(stddev=0.01)
self.bias_init = tf.constant_initializer(0.)
self.bn_init = {'beta': tf.constant_initializer(0.),
'gamma': tf.random_normal_initializer(1., 0.01)}
self.bn_params = {'is_training':self.is_training,'decay':0.9,'epsilon':1e-5,
'param_initializers':self.bn_init,'updates_collections':None}
# Build graph
with tf.variable_scope(self.name,reuse=False) as scope:
with slim.arg_scope([slim.fully_connected]
,activation_fn=self.actv
,weights_initializer=self.fully_init
,biases_initializer=self.bias_init
,normalizer_fn=self.bn,normalizer_params=self.bn_params
,weights_regularizer=None):
_net = self.x # Input [N x xdim]
for h_idx in range(len(self.h_dims)): # Loop over hidden layers
_h_dim = self.h_dims[h_idx]
_net = slim.fully_connected(_net,_h_dim,scope='lin'+str(h_idx))
_net = slim.dropout(_net,keep_prob=self.kp,is_training=self.is_training
,scope='dr'+str(h_idx))
self.feat = _net # Feature [N x Q]
self.out = slim.fully_connected(self.feat,self.y_dim,activation_fn=None
,scope='out') # [N x D]
def build_graph(self):
# fitting loss
if self.L1_LOSS: # L1 loss
self._loss_fit = tf.reduce_sum(tf.abs(self.out-self.y),axis=1) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
elif self.ROBUST_LOSS: # Tukey biweight loss
USE_MAD = False
self.residuals = self.out-self.y # [N x 1]
if USE_MAD:
median = tf.contrib.distributions.percentile(self.residuals, 50.0)
temp = tf.abs(self.residuals-median)
mad = tf.contrib.distributions.percentile(temp, 50.0)
b = 1.4826 # 1.4826
self.r_mad = self.residuals/b/mad
else:
self.r_mad = self.residuals
c = 1 # 4.6851
self.condition = tf.less(tf.abs(self.r_mad),c)
self._loss_fit = tf.where(self.condition,
c*c/6*(1-tf.pow((1-tf.pow(self.r_mad/c,2)),3)),
c*c/6*tf.ones_like(self.r_mad)) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
elif self.LEAKY_ROBUST_LOSS: # Leakey Tukey biweight loss
USE_MAD = False
self.residuals = self.out-self.y # [N x 1]
if USE_MAD:
median = tf.contrib.distributions.percentile(self.residuals, 50.0)
temp = tf.abs(self.residuals-median)
mad = tf.contrib.distributions.percentile(temp, 50.0)
b = 1.4826 # 1.4826
self.r_mad = self.residuals/b/mad
else:
self.r_mad = self.residuals
c = 1 # 4.6851
self.condition = tf.less(tf.abs(self.r_mad),c)
leaky_rate = 0.1 # 0.1
self._loss_fit = tf.where(self.condition,
c*c/6*(1-tf.pow((1-tf.pow(self.r_mad/c,2)),3)),
leaky_rate*(tf.abs(self.r_mad)-c) + c*c/6) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
else: # ordinary L2 loss
self._loss_fit = tf.reduce_sum(tf.pow(self.out-self.y,2),axis=1) # [N x 1]
self.loss_fit = tf.reduce_mean(self._loss_fit) # [1]
# Weight decay
_t_vars = tf.trainable_variables()
self.c_vars = [var for var in _t_vars if '%s/'%(self.name) in var.name]
self.l2_reg = self.l2_reg_coef*tf.reduce_sum(tf.stack([tf.nn.l2_loss(v) for v in self.c_vars])) # [1]
self.loss_total = self.loss_fit + self.l2_reg # [1]
# Optimizer
USE_ADAM = False
if USE_ADAM:
self.optm = tf.train.AdamOptimizer(learning_rate=self.lr,beta1=0.9,beta2=0.999
,epsilon=1e-8).minimize(self.loss_total)
else:
self.optm = tf.train.MomentumOptimizer(learning_rate=self.lr
,momentum=0.5).minimize(self.loss_total)
def check_params(self):
_g_vars = tf.global_variables()
self.g_vars = [var for var in _g_vars if '%s/'%(self.name) in var.name]
if self.VERBOSE:
print ("==== Global Variables ====")
for i in range(len(self.g_vars)):
w_name = self.g_vars[i].name
w_shape = self.g_vars[i].get_shape().as_list()
if self.VERBOSE:
print (" [%02d] Name:[%s] Shape:[%s]" % (i,w_name,w_shape))
def sampler(self,_sess,_x):
outVal = _sess.run(self.out,feed_dict={self.x:_x,self.kp:1.0,self.is_training:False})
return outVal
def save2npz(self,_sess,_save_name=None):
""" Save name """
if _save_name==None:
_save_name='net/net_%s.npz'%(self.name)
""" Get global variables """
self.g_wnames,self.g_wvals,self.g_wshapes = [],[],[]
for i in range(len(self.g_vars)):
curr_wname = self.g_vars[i].name
curr_wvar = [v for v in tf.global_variables() if v.name==curr_wname][0]
curr_wval = _sess.run(curr_wvar)
curr_wval_sqz = curr_wval.squeeze()
self.g_wnames.append(curr_wname)
self.g_wvals.append(curr_wval_sqz)
self.g_wshapes.append(curr_wval.shape)
""" Save """
np.savez(_save_name,g_wnames=self.g_wnames,g_wvals=self.g_wvals,g_wshapes=self.g_wshapes)
if self.VERBOSE:
print ("[%s] saved. Size is [%.4f]MB" %
(_save_name,os.path.getsize(_save_name)/1000./1000.))
def restore_from_npz(self,_sess,_loadname=None):
if _loadname==None:
_loadname='net/net_%s_final.npz'%(self.name)
l = np.load(_loadname)
g_wnames = l['g_wnames']
g_wvals = l['g_wvals']
g_wshapes = l['g_wshapes']
for widx,wname in enumerate(g_wnames):
curr_wvar = [v for v in tf.global_variables() if v.name==wname][0]
_sess.run(tf.assign(curr_wvar,g_wvals[widx].reshape(g_wshapes[widx])))
if self.VERBOSE:
print ("Weight restored from [%s] Size is [%.4f]MB" %
(_loadname,os.path.getsize(_loadname)/1000./1000.))
def save2mat_from_npz(self,_x_train='',_y_train='',_save_name=None,_npz_path=None):
# Save weights to mat file so that MATLAB can use it.
if _npz_path == None:
_npz_path = 'net/net_%s.npz'%(self.name)
l = | np.load(_npz_path) | numpy.load |
import os
import math
import numpy as np
import itertools as it
import pylab as plt
import seaborn as sns
import pandas as pd
import multiprocessing as mp
import tables
#from scipy.spatial.distance import euclidean
from numba import njit
from blechpy.utils.particles import HMMInfoParticle
from blechpy import load_dataset
from blechpy.dio import h5io
from blechpy.plotting import hmm_plot as hmmplt
from joblib import Parallel, delayed, Memory
from appdirs import user_cache_dir
cachedir = user_cache_dir('blechpy')
memory = Memory(cachedir, verbose=0)
TEST_PARAMS = {'n_cells': 10, 'n_states': 4, 'state_seq_length': 5,
'trial_time': 3.5, 'dt': 0.001, 'max_rate': 50, 'n_trials': 15,
'min_state_dur': 0.05, 'noise': 0.01, 'baseline_dur': 1}
FACTORIAL_LOOKUP = np.array([math.factorial(x) for x in range(20)])
@njit
def fast_factorial(x):
if x < len(FACTORIAL_LOOKUP):
return FACTORIAL_LOOKUP[x]
else:
y = 1
for i in range(1,x+1):
y = y*i
return y
@njit
def poisson(rate, n, dt):
'''Gives probability of each neurons spike count assuming poisson spiking
'''
tmp = np.power(rate*dt, n) / np.array([fast_factorial(x) for x in n])
tmp = tmp * np.exp(-rate*dt)
return tmp
@njit
def forward(spikes, dt, PI, A, B):
'''Run forward algorithm to compute alpha = P(Xt = i| o1...ot, pi)
Gives the probabilities of being in a specific state at each time point
given the past observations and initial probabilities
Parameters
----------
spikes : np.array
N x T matrix of spike counts with each entry ((i,j)) holding the # of
spikes from neuron i in timebine j
nStates : int, # of hidden states predicted to have generate the spikes
dt : float, timebin in seconds (i.e. 0.001)
PI : np.array
nStates x 1 vector of initial state probabilities
A : np.array
nStates x nStates state transmission matrix with each entry ((i,j))
giving the probability of transitioning from state i to state j
B : np.array
N x nSates rate matrix. Each entry ((i,j)) gives this predicited rate
of neuron i in state j
Returns
-------
alpha : np.array
nStates x T matrix of forward probabilites. Each entry (i,j) gives
P(Xt = i | o1,...,oj, pi)
norms : np.array
1 x T vector of norm used to normalize alpha to be a probability
distribution and also to scale the outputs of the backward algorithm.
norms(t) = sum(alpha(:,t))
'''
nTimeSteps = spikes.shape[1]
nStates = A.shape[0]
# For each state, use the the initial state distribution and spike counts
# to initialize alpha(:,1)
row = np.array([PI[i] * np.prod(poisson(B[:,i], spikes[:,0], dt))
for i in range(nStates)])
alpha = np.zeros((nStates, nTimeSteps))
norms = [np.sum(row)]
alpha[:, 0] = row/norms[0]
for t in range(1, nTimeSteps):
tmp = np.array([np.prod(poisson(B[:, s], spikes[:, t], dt)) *
np.sum(alpha[:, t-1] * A[:,s])
for s in range(nStates)])
tmp_norm = np.sum(tmp)
norms.append(tmp_norm)
tmp = tmp / tmp_norm
alpha[:, t] = tmp
return alpha, norms
@njit
def backward(spikes, dt, A, B, norms):
''' Runs the backward algorithm to compute beta = P(ot+1...oT | Xt=s)
Computes the probability of observing all future observations given the
current state at each time point
Paramters
---------
spike : np.array, N x T matrix of spike counts
nStates : int, # of hidden states predicted
dt : float, timebin size in seconds
A : np.array, nStates x nStates matrix of transition probabilities
B : np.array, N x nStates matrix of estimated spike rates for each neuron
Returns
-------
beta : np.array, nStates x T matrix of backward probabilities
'''
nTimeSteps = spikes.shape[1]
nStates = A.shape[0]
beta = np.zeros((nStates, nTimeSteps))
beta[:, -1] = 1 # Initialize final beta to 1 for all states
tStep = list(range(nTimeSteps-1))
tStep.reverse()
for t in tStep:
for s in range(nStates):
beta[s,t] = np.sum((beta[:, t+1] * A[s,:]) *
np.prod(poisson(B[:, s], spikes[:, t+1], dt)))
beta[:, t] = beta[:, t] / norms[t+1]
return beta
@njit
def baum_welch(spikes, dt, A, B, alpha, beta):
nTimeSteps = spikes.shape[1]
nStates = A.shape[0]
gamma = np.zeros((nStates, nTimeSteps))
epsilons = np.zeros((nStates, nStates, nTimeSteps-1))
for t in range(nTimeSteps):
if t < nTimeSteps-1:
gamma[:, t] = (alpha[:, t] * beta[:, t]) / np.sum(alpha[:,t] * beta[:,t])
epsilonNumerator = np.zeros((nStates, nStates))
for si in range(nStates):
for sj in range(nStates):
probs = np.prod(poisson(B[:,sj], spikes[:, t+1], dt))
epsilonNumerator[si, sj] = (alpha[si, t]*A[si, sj]*
beta[sj, t]*probs)
epsilons[:, :, t] = epsilonNumerator / np.sum(epsilonNumerator)
return gamma, epsilons
def isNotConverged(oldPI, oldA, oldB, PI, A, B, thresh=1e-4):
dPI = np.sqrt(np.sum(np.power(oldPI - PI, 2)))
dA = np.sqrt(np.sum(np.power(oldA - A, 2)))
dB = np.sqrt(np.sum(np.power(oldB - B, 2)))
print('dPI = %f, dA = %f, dB = %f' % (dPI, dA, dB))
if all([x < thresh for x in [dPI, dA, dB]]):
return False
else:
return True
def poisson_viterbi(spikes, dt, PI, A, B):
'''
Parameters
----------
spikes : np.array, Neuron X Time matrix of spike counts
PI : np.array, nStates x 1 vector of initial state probabilities
A : np.array, nStates X nStates matric of state transition probabilities
B : np.array, Neuron X States matrix of estimated firing rates
dt : float, time step size in seconds
Returns
-------
bestPath : np.array
1 x Time vector of states representing the most likely hidden state
sequence
maxPathLogProb : float
Log probability of the most likely state sequence
T1 : np.array
State X Time matrix where each entry (i,j) gives the log probability of
the the most likely path so far ending in state i that generates
observations o1,..., oj
T2: np.array
State X Time matrix of back pointers where each entry (i,j) gives the
state x(j-1) on the most likely path so far ending in state i
'''
if A.shape[0] != A.shape[1]:
raise ValueError('Transition matrix is not square')
nStates = A.shape[0]
nCells, nTimeSteps = spikes.shape
T1 = | np.zeros((nStates, nTimeSteps)) | numpy.zeros |
"""
PL Modeling Program
Python file 1/3: InteractivePLFittingGUI.py (implements the GUI)
-> Python file 2/3: PLModeling.py (implements the PL emission models)
Python file 3/3: InterferenceFunction.py (implements the interference function models)
Author: <NAME>
Date: March 2021
"""
import scipy.special
import scipy.constants as const
from scipy.constants import pi, c, h
from scipy import integrate
import numpy as np
k_eV = const.value('Boltzmann constant in eV/K')
h_eV = const.value('Planck constant in eV/Hz')
try:
import pickle5 as pickle
except:
import pickle
# Set proper path for files when executable is run
from os import path
import sys
path_to_file = getattr(sys, '_MEIPASS', path.abspath(path.dirname(__file__)))
class PLModel:
"""
Models the PL spectra according to three possible models:
BGF (band gap fluctuations)
EF (electrostatic fluctuations)
UPF (unified potential fluctuations: band gap + electrostatic fluctuations)
"""
def __init__(self, params, E=(np.linspace(0.4, 1.5, 1111)), model='BGF'):
"""
Initialise the class.
params =
BGF: mean_E_g, beta, sigma_g, T
EF: E_g, theta, gamma, dmu, T, a0d
UPF: mean_E_g, beta, sigma_g, theta, gamma, T, a0d
"""
# Define class variables
# params:
self.params = params
self.E = E
# Fix quasi-fermi level splitting for BGF model
# (does not affect normalised PL spectra)
self.BGF_mu0 = 0.5
file = open(path.join(path_to_file, 'files', 'conv2Dlookup.pkl'), 'rb')
self.conv_theta = pickle.load(file)
file.close()
# Initialise class outputs
self.abs, self.emission = np.zeros(E.shape), np.zeros(E.shape)
self.solveFunction = self.solveBGF
self.fitFunction = self.emissionBGFFit
self.updateModel(model)
# Solve model
self.solve()
def updateModel(self, model):
"""Update the PL model and corresponding functions."""
if model == 'BGF':
self.solveFunction = self.solveBGF
self.fitFunction = self.emissionBGFFit
elif model == 'EF':
self.solveFunction = self.solveEF
self.fitFunction = self.emissionEFFit
elif model == 'UPF':
self.solveFunction = self.solveUPF
self.fitFunction = self.emissionUPFFit
def solve(self):
"""Solve for absorption and emission."""
self.solveFunction()
def absBGF(self):
"""Compute the absorptance of the BGF model."""
return 0.5*scipy.special.erfc((self.params[0]-self.E)
/ (2**0.5*self.params[2]))
def emissionBGF(self, E, mean_E_g, beta, sigma_g, T):
"""Compute the emission of the BGF model."""
return (pi/(h**3*c**2)
* scipy.special.erfc((mean_E_g-E+beta*sigma_g**2/(k_eV*T))
/ (2**0.5*sigma_g))*(E*const.e)**2
* np.exp(-(E-self.BGF_mu0-beta*mean_E_g)/(k_eV*T)
+ 0.5*(beta*sigma_g/(k_eV*T))**2))
def emissionBGFFit(self, E, mean_E_g, beta, sigma_g, T):
"""Fit the BGF model."""
PL = self.emissionBGF(E, mean_E_g, beta, sigma_g, T)
return PL/ | np.max(PL) | numpy.max |
'''
Reference code to showcase MXNet model prediction on AWS Lambda
@author: <NAME> (<EMAIL>)
version: 0.2
'''
import os
import boto3
import json
import tempfile
import urllib2
import mxnet as mx
import numpy as np
from PIL import Image
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
f_params = 'resnet-18-0000.params'
f_symbol = 'resnet-18-symbol.json'
bucket = '<<bucket-name>>'
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
# params
f_params_file = tempfile.NamedTemporaryFile()
s3_client.download_file(bucket, f_params, f_params_file.name)
f_params_file.flush()
# symbol
f_symbol_file = tempfile.NamedTemporaryFile()
s3_client.download_file(bucket, f_symbol, f_symbol_file.name)
f_symbol_file.flush()
def load_model(s_fname, p_fname):
"""
Load model checkpoint from file.
:return: (arg_params, aux_params)
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
"""
symbol = mx.symbol.load(s_fname)
save_dict = mx.nd.load(p_fname)
arg_params = {}
aux_params = {}
for k, v in save_dict.items():
tp, name = k.split(':', 1)
if tp == 'arg':
arg_params[name] = v
if tp == 'aux':
aux_params[name] = v
return symbol, arg_params, aux_params
def predict(url, mod, synsets):
'''
predict labels for a given image
'''
req = urllib2.urlopen(url)
img_file = tempfile.NamedTemporaryFile()
img_file.write(req.read())
img_file.flush()
img = Image.open(img_file.name)
# PIL conversion
# size = 224, 224
# img = img.resize((224, 224), Image.ANTIALIAS)
# center crop and resize
# ** width, height must be greater than new_width, new_height
new_width, new_height = 224, 224
width, height = img.size # Get dimensions
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
img = img.crop((left, top, right, bottom))
# convert to numpy.ndarray
sample = np.asarray(img)
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
img = | np.swapaxes(sample, 1, 2) | numpy.swapaxes |
import numpy as np
import itertools as it
import pandas as pd
def ilogit(x):
return 1/(1+np.exp(-x))
def get_network(politics, p):
N = politics.shape[0]
leaning_order = np.argsort(politics).argsort() / ((N - 1) / 5.0)
side = 1.0 * (leaning_order + np.random.normal(0, 0.0000001)
> 2.5) # Indicates on left side
network = np.repeat(side, N).reshape((N, N))
network[network == 0] = 1 - p
network[network == 1] = p
network[:, side == 0] = 1 - network[:, side == 0]
probabilities = network / np.sum(network, axis=1)
network[:, :] = 0
n_connections = np.round(np.random.uniform(1, 15, N) / 2).astype('int')
initiating_node = np.repeat(np.arange(N),n_connections)
initiating_node_size = np.repeat(side, n_connections)
left_connections = np.random.choice( | np.arange(N) | numpy.arange |
#!/usr/bin/env python
import numpy as np
from scipy.spatial import Delaunay
from . import pg_utilities
from . import imports_and_exports
"""
.. module:: generate_shapes
:synopsis: Contains code to generate placental shapes for generic placental models.
:synopsis:Contains code to generate placental shapes for generic placental models \n
(i.e. from literature measures without specific data from an individual
"""
def equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity):
"""
:Function name: **equispaced_data_in_ellipsoid**
Generates equally spaced data points in an ellipsoid.
:inputs:
- n: Number of data points which we aim to generate
- volume: Volume of ellipsoid
- thickness: Placental thickness (z-dimension)
- ellipticity: Ratio of y to x axis dimensions
return:
- Edata: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_in_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points in an ellipse with z-axis thickness 3, volume 10, and with the y-axis dimension 1.1 times the x-axis dimension.
"""
data_spacing = (volume / n) ** (1.0 / 3.0)
print('Generating data ' + str(data_spacing) + ' apart')
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# Aiming to generate seed points that fill a cuboid encompasing the placental volume then remove seed points that
# are external to the ellipsoid
num_data = 0 # zero the total number of data points
# Calculate the number of points that should lie in each dimension in a cube
nd_x = np.floor(2.0 * (x_radius + data_spacing) / data_spacing)
nd_y = np.floor(2.0 * (y_radius + data_spacing) / data_spacing)
nd_z = np.floor(2.0 * (z_radius + data_spacing) / data_spacing)
nd_x = int(nd_x)
nd_y = int(nd_y)
nd_z = int(nd_z)
# Set up edge node coordinates
x_coord = np.linspace(-x_radius - data_spacing / 2.0, x_radius + data_spacing / 2.0, nd_x)
y_coord = np.linspace(-y_radius - data_spacing / 2.0, y_radius + data_spacing / 2.0, nd_y)
z_coord = np.linspace(-z_radius - data_spacing / 2.0, z_radius + data_spacing / 2.0, nd_z)
# Use these vectors to form a unifromly spaced grid
data_coords = np.vstack(np.meshgrid(x_coord, y_coord, z_coord)).reshape(3, -1).T
# Store nodes that lie within ellipsoid
datapoints = np.zeros((nd_x * nd_y * nd_z, 3))
for i in range(len(data_coords)): # Loop through grid
coord_check = pg_utilities.check_in_ellipsoid(data_coords[i][0], data_coords[i][1], data_coords[i][2], x_radius,
y_radius, z_radius)
if coord_check is True: # Has to be strictly in the ellipsoid
datapoints[num_data, :] = data_coords[i, :] # add to data array
num_data = num_data + 1
datapoints.resize(num_data, 3,refcheck=False) # resize data array to correct size
print('Data points within ellipsoid allocated. Total = ' + str(len(datapoints)))
return datapoints
def uniform_data_on_ellipsoid(n, volume, thickness, ellipticity, random_seed):
"""
:Function name: **uniform_data_on_ellipsoid**
Generates equally spaced data points on the positive z-surface of an ellipsoid
:inputs:
- n: number of data points which we aim to generate
- volume: volume of ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
:return:
- chorion_data: A nx3 array of datapoints, with each point being defined by its x-,y-, and z- coordinates
A way you might want to use me is:
>>> n = 100
>>> volume = 10
>>> thickness = 3
>>> ellipticity = 1.1
>>> equispaced_data_on_ellipsoid(n, volume, thickness, ellipticity)
This will return 100 data points on the positive z-surface ellipse with z-axis thickness 3, volume 10,
and with the y-axis dimension 1.1 times the x-axis dimension.
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
area_estimate = np.pi * x_radius * y_radius
data_spacing = 0.85 * np.sqrt(area_estimate / n)
chorion_data = np.zeros((n, 3))
np.random.seed(random_seed)
generated_seed = 0
acceptable_attempts = n * 1000 # try not to have too many failures
attempts = 0
while generated_seed < n and attempts < acceptable_attempts:
# generate random x-y coordinates between negative and positive radii
new_x = np.random.uniform(-x_radius, x_radius)
new_y = np.random.uniform(-y_radius, y_radius)
# check if new coordinate is on the ellipse
if ((new_x / x_radius) ** 2 + (new_y / y_radius) ** 2) < 1: # on the surface
if generated_seed == 0:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
else:
reject = False
for j in range(0, generated_seed + 1):
distance = (chorion_data[j - 1][0] - new_x) ** 2 + (chorion_data[j - 1][1] - new_y) ** 2
distance = np.sqrt(distance)
if distance <= data_spacing:
reject = True
break
if reject is False:
generated_seed = generated_seed + 1
new_z = pg_utilities.z_from_xy(new_x, new_y, x_radius, y_radius, z_radius)
chorion_data[generated_seed - 1][:] = [new_x, new_y, new_z]
attempts = attempts + 1
chorion_data.resize(generated_seed, 3) # resize data array to correct size
print('Data points on ellipsoid allocated. Total = ' + str(len(chorion_data)) )
return chorion_data
def gen_rect_cover_ellipsoid(volume, thickness, ellipticity, x_spacing, y_spacing, z_spacing):
# Generates equally spaced data nodes and elements and constructs a rectangular 'mesh' that covers the space that is
# made up of an ellipsoidal placenta
# volume=volume of ellipsoid
# thickness = placental thickness (z-dimension)
# ellipticity = ratio of y to x axis dimensions
# X,Y,Z spacing is the number of elements required in each of the x, y z directions
# Calculate the dimensions of the ellipsoid
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
# z height of ellipsoid is 2* zradius
# We want number of nodes to cover height and have prescribed spaing
nnod_x = int(np.ceil(x_radius * 2.0 / x_spacing)) + 1
x_width = x_spacing * (nnod_x - 1)
nnod_y = int(np.ceil(y_radius * 2.0 / y_spacing)) + 1
y_width = y_spacing * (nnod_y - 1)
nnod_z = int(np.ceil(z_radius * 2.0 / z_spacing)) + 1
z_width = z_spacing * (nnod_z - 1)
node_loc = gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z)
# Generating the element connectivity of each cube element, 8 nodes for each 3D cube element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z)
return {'nodes': node_loc, 'elems': elems, 'total_nodes': nnod_x * nnod_y * nnod_z,
'total_elems': (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)}
def gen_ellip_mesh_tet(volume, thickness, ellipticity, n):
""" Generates ellipsoid tetrahedral mesh for 3D problems
Inputs:
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
- n: number of datapoints generated to create the mesh
Returns:
- nodes: nodes location of mesh
- elems: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
nodeSpacing = (n / (2 * x_radius * 2 * y_radius * 2 * z_radius)) ** (1. / 3)
nnod_x = 2 * x_radius * nodeSpacing
nnod_y = 2 * y_radius * nodeSpacing
nnod_z = 2 * z_radius * nodeSpacing
nodes = gen_rectangular_node(x_radius * 2, y_radius * 2, z_radius * 2, nnod_x, nnod_y, nnod_z)
# nodes inside the ellipsoid
ellipsoid_node = np.zeros((len(nodes), 3))
count = 0
for nnode in range(0, len(nodes)):
coord_point = nodes[nnode][0:3]
inside = pg_utilities.check_in_on_ellipsoid(coord_point[0], coord_point[1], coord_point[2], x_radius, y_radius,
z_radius)
if inside:
ellipsoid_node[count, :] = coord_point[:]
count = count + 1
ellipsoid_node.resize(count, 3,refcheck=False)
xyList = ellipsoid_node[:, [0, 1]]
xyListUnique = np.vstack({tuple(row) for row in xyList})
# looking for z_coordinate of surface nodes
for xyColumn in xyListUnique:
xyNodes = np.where(np.all(xyList == xyColumn, axis=1))[0]
if len(xyNodes) > 1:
x_coord = ellipsoid_node[xyNodes[0], 0]
y_coord = ellipsoid_node[xyNodes[0], 1]
ellipsoid_node[xyNodes[len(xyNodes) - 1], 2] = pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius,
z_radius)
ellipsoid_node[xyNodes[0], 2] = -1 * (
pg_utilities.z_from_xy(x_coord, y_coord, x_radius, y_radius, z_radius))
# generate tetrahedral mesh
pyMesh = Delaunay(ellipsoid_node)
# Build arrays to pass into openCMISS conversion:
node_loc = pyMesh.points
temp_elems = pyMesh.simplices
# CHECK ELEMENTS FOR 0 VOLUME:
min_vol = 0.00001
index = 0
indexArr = []
for element in temp_elems:
x_coor = []
y_coor = []
z_coor = []
for node in element:
x_coor.append(node_loc[node][0])
y_coor.append(node_loc[node][1])
z_coor.append(node_loc[node][2])
vmat = np.vstack((x_coor, y_coor, z_coor, [1.0, 1.0, 1.0, 1.0])) # matrix of coor of element
elem_volume = (1 / 6.0) * abs(np.linalg.det(vmat)) # volume of each tetrahedral element
# if volume is not zero
if elem_volume > min_vol:
indexArr.append(index)
index = index + 1
# update arrays without 0 volume elements, to pass into openCMISS
elems = temp_elems[indexArr, :]
for i in range(len(elems)):
elems[i] = [x + 1 for x in elems[i]]
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node_loc) + 1)
return {'nodes': node_loc, 'elems': elems, 'element_array': element_array, 'node_array': node_array,
'nodeSpacing': nodeSpacing}
def gen_rectangular_node(x_width, y_width, z_width, nnod_x, nnod_y, nnod_z):
# Create linspaces for x y and z coordinates
x = np.linspace(-x_width / 2.0, x_width / 2.0, int(nnod_x)) # linspace for x axis
y = np.linspace(-y_width / 2.0, y_width / 2.0, int(nnod_y)) # linspace for y axis
z = np.linspace(-z_width / 2.0, z_width / 2.0, int(nnod_z)) # linspace for z axis
node_loc_temp = np.vstack(np.meshgrid(y, z, x)).reshape(3, -1).T # generate nodes for rectangular mesh
node_loc = np.zeros((len(node_loc_temp), 3))
for i in range(0, len(node_loc)):
node_loc[i][0] = node_loc_temp[i][2]
node_loc[i][1] = node_loc_temp[i][0]
node_loc[i][2] = node_loc_temp[i][1]
return node_loc
def gen_rectangular_mesh2(nel_x, nel_y, nel_z, xdim, ydim, zdim, element_type):
# generates a rectangular mesh of defined dimenions using either linear or quadratic elements
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
node = gen_rectangular_node(xdim, ydim, zdim, nnod_x, nnod_y, nnod_z) # getting nodes
if element_type == 1: # linear element
elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
element_array = range(1, len(elems) + 1)
node_array = range(1, len(node) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'nodes': node, 'elems': elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def gen_3d_ellipsoid(nel_x, nel_y, nel_z, volume, thickness, ellipticity, element_type):
""" Generates ellipsoid placental mesh to solve 3D problems (note this is not a quality structured mesh)
Inputs:
- nel: number of element in x,y,z axis , the more nel, the rounder the mesh
- volume: volume of placental ellipsoid
- thickness: placental thickness (z-dimension)
- ellipticity: ratio of y to x axis dimensions
Returns:
- placental_node_coor: nodes location of mesh
- placental_el_con: element connectivity of mesh (tetrahedral element)
- node_array: array of nodes
- element_array: array of elements
"""
# creating cube between -1 and 1 with n number of element
# cubelength=2
if element_type == 1: # linear element
nnod_x = int(nel_x + 1)
nnod_y = int(nel_y + 1)
nnod_z = int(nel_z + 1)
elif element_type == 2: # quadratic element
nnod_x = int((nel_x * 2) + 1)
nnod_y = int((nel_y * 2) + 1)
nnod_z = int((nel_z * 2) + 1)
radii = pg_utilities.calculate_ellipse_radii(volume, thickness, ellipticity)
z_radius = radii['z_radius']
x_radius = radii['x_radius']
y_radius = radii['y_radius']
cube_node = gen_rectangular_node(2 * x_radius, 2 * y_radius, 2 * z_radius, nnod_x, nnod_y, nnod_z)
if element_type == 1: # linear element
cube_elems = cube_mesh_connectivity(nnod_x, nnod_y, nnod_z) # getting elem connectivity
elif element_type == 2: # quadratic element
cube_elems = cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y,
nnod_z) # getting element connectivity
ellipsoid_coor = np.zeros((len(cube_node), 3))
for ii in range(0, len(cube_node)):
ellipsoid_coor[ii, 0] = cube_node[ii, 0] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 1] ** 2 *
cube_node[ii, 2] ** 2 / (
3.0 * y_radius ** 2 * z_radius ** 2)) # for x_coor
ellipsoid_coor[ii, 1] = cube_node[ii, 1] * np.sqrt(1.0 - cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) -
cube_node[ii, 2] ** 2 / (2.0 * z_radius ** 2) + cube_node[
ii, 0] ** 2 * cube_node[ii, 2] ** 2
/ (3.0 * x_radius ** 2 * z_radius ** 2)) # for y_coor
ellipsoid_coor[ii, 2] = cube_node[ii, 2] * np.sqrt(1.0 - cube_node[ii, 1] ** 2 / (2.0 * y_radius ** 2) -
cube_node[ii, 0] ** 2 / (2.0 * x_radius ** 2) + cube_node[
ii, 1] ** 2 * cube_node[ii, 0] ** 2
/ (3.0 * y_radius ** 2 * x_radius ** 2)) # for z_coor
element_array = range(1, len(cube_elems) + 1)
node_array = range(1, len(ellipsoid_coor) + 1)
if element_type == 2:
surfacenodes = identify_surface_node_quad(nel_x, nel_y, nel_z)
else:
print("This element type has no implemented surface node definition")
surfacenodes = 0
return {'placental_node_coor': ellipsoid_coor, 'placental_el_con': cube_elems, 'element_array': element_array,
'node_array': node_array, 'surface_nodes': surfacenodes}
def cube_mesh_connectivity(nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity
"""
num_elems = (nnod_x - 1) * (nnod_y - 1) * (nnod_z - 1)
elems = np.zeros((num_elems, 9),
dtype=int) # this stores first element number and then the nodes of each mesh element
element_number = 0
ne = 0
# loop through elements
for k in range(1, nnod_z):
for j in range(1, nnod_y):
for i in range(1, nnod_x):
elems[ne][0] = ne # store element number
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # lowest coordinates
elems[ne][2] = elems[ne][1] + 1 # add one in x
elems[ne][3] = elems[ne][1] + nnod_x # go through x and find first in y
elems[ne][4] = elems[ne][3] + 1 # add one in y
elems[ne][5] = elems[ne][1] + nnod_x * nnod_y # same as 1 -4 but at higher z -coord
elems[ne][6] = elems[ne][2] + nnod_x * nnod_y
elems[ne][7] = elems[ne][3] + nnod_x * nnod_y
elems[ne][8] = elems[ne][4] + nnod_x * nnod_y
ne = ne + 1
return elems
def cube_mesh_connectivity_quadratic(nel_x, nel_y, nel_z, nnod_x, nnod_y, nnod_z):
"""Generates element connectivity in quadratic cube mesh
Inputs:
- nnod_x:number of node in x axis
- nnod_y:number of node in y axis
- nnod_z:number of node in z axis
Outputs:
- elems: array of element connectivity in quadratic
"""
num_elems = nel_x * nel_y * nel_z
elems = np.zeros((num_elems, 28), dtype=int)
element_number = 0
ne = 0
# Got the element
for k in range(1, nnod_z, 2):
for j in range(1, nnod_y, 2):
for i in range(1, nnod_x, 2):
# 1st layer
elems[ne][0] = ne
elems[ne][1] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) # 1st node
elems[ne][2] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 1 # right subsequent node
elems[ne][3] = (i - 1) + (nnod_x) * (j - 1) + nnod_x * nnod_y * (k - 1) + 2 # right subsequent node
elems[ne][4] = elems[ne][1] + nnod_x # 1st node in another y layer
elems[ne][5] = elems[ne][1] + nnod_x + 1 # right subsequent node
elems[ne][6] = elems[ne][1] + nnod_x + 2 # right subsequent node
elems[ne][7] = elems[ne][1] + 2 * (nnod_x) # 1st node in another y layer
elems[ne][8] = elems[ne][1] + 2 * (nnod_x) + 1 # right subsequent node
elems[ne][9] = elems[ne][1] + 2 * (nnod_x) + 2 # right subsequent node
# 2nd layer
elems[ne][10] = elems[ne][1] + nnod_x * nnod_y # same in one z layer
elems[ne][11] = elems[ne][2] + nnod_x * nnod_y
elems[ne][12] = elems[ne][3] + nnod_x * nnod_y
elems[ne][13] = elems[ne][4] + nnod_x * nnod_y
elems[ne][14] = elems[ne][5] + nnod_x * nnod_y
elems[ne][15] = elems[ne][6] + nnod_x * nnod_y
elems[ne][16] = elems[ne][7] + nnod_x * nnod_y
elems[ne][17] = elems[ne][8] + nnod_x * nnod_y
elems[ne][18] = elems[ne][9] + nnod_x * nnod_y
# thrid layer
elems[ne][19] = elems[ne][1] + nnod_x * nnod_y * 2 # same in another z layer
elems[ne][20] = elems[ne][2] + nnod_x * nnod_y * 2
elems[ne][21] = elems[ne][3] + nnod_x * nnod_y * 2
elems[ne][22] = elems[ne][4] + nnod_x * nnod_y * 2
elems[ne][23] = elems[ne][5] + nnod_x * nnod_y * 2
elems[ne][24] = elems[ne][6] + nnod_x * nnod_y * 2
elems[ne][25] = elems[ne][7] + nnod_x * nnod_y * 2
elems[ne][26] = elems[ne][8] + nnod_x * nnod_y * 2
elems[ne][27] = elems[ne][9] + nnod_x * nnod_y * 2
ne = ne + 1
return elems
def identify_surface_node_quad(nel_x, nel_y, nel_z):
"""Generates collection of nodes that are on the surface of in quadratic placental mesh
Inputs:
- nel_x:number of elem in x axis
- nel_y:number of elem in y axis
- nel_z:number of elem in z axis
Outputs:
- surfacenode: collection of nodes on the surface of placental mesh
"""
nnod_x = int((nel_x * 2) + 1) # number of nodes in x axis
nnod_y = int((nel_y * 2) + 1) # number of nodes in y axis
nnod_z = int((nel_z * 2) + 1) # number of nodes in z axis
# For left and right surface
sIEN = np.zeros((9, nel_y * nel_z), dtype=int) # to store surface indiviaul element nodes (sIEN)
e = 0
for k in range(1, nnod_x * nnod_y * (nnod_z - 1), (nnod_x * nnod_y) * 2): # go up
for j in range(1, nnod_x * (nnod_y - 1), 2 * nnod_x): # go left
sIEN[0, e] = j + (k - 1) # 1st node
sIEN[1, e] = sIEN[0, e] + (nnod_x) * (nnod_y) # 2nd node
sIEN[2, e] = sIEN[1, e] + (nnod_x) * (nnod_y) # 3rd node
sIEN[3, e] = sIEN[0, e] + nnod_x # 4th node
sIEN[4, e] = sIEN[1, e] + nnod_x # 5th node
sIEN[5, e] = sIEN[2, e] + nnod_x # 6th node
sIEN[6, e] = sIEN[3, e] + nnod_x # 7th node
sIEN[7, e] = sIEN[4, e] + nnod_x # 8th node
sIEN[8, e] = sIEN[5, e] + nnod_x # 9th node
e = e + 1
left = np.unique(sIEN) # collection of nodes of left surface
right = np.unique(sIEN.T + (nnod_x - 1)) # collection of nodes on right surface
# For front and back surface
sIEN = | np.zeros((9, nel_x * nel_z), dtype=int) | numpy.zeros |
from __future__ import print_function, absolute_import, division
import sys
import numpy
from numba import config, unittest_support as unittest
from numba.npyufunc.ufuncbuilder import GUFuncBuilder
from numba import vectorize, guvectorize
from numba.npyufunc import PyUFunc_One
from numba.npyufunc.dufunc import DUFunc as UFuncBuilder
from ..support import tag
def add(a, b):
"""An addition"""
return a + b
def equals(a, b):
return a == b
def mul(a, b):
"""A multiplication"""
return a * b
def guadd(a, b, c):
"""A generalized addition"""
x, y = c.shape
for i in range(x):
for j in range(y):
c[i, j] = a[i, j] + b[i, j]
@vectorize(nopython=True)
def inner(a, b):
return a + b
@vectorize(["int64(int64, int64)"], nopython=True)
def inner_explicit(a, b):
return a + b
def outer(a, b):
return inner(a, b)
def outer_explicit(a, b):
return inner_explicit(a, b)
class Dummy: pass
def guadd_obj(a, b, c):
Dummy() # to force object mode
x, y = c.shape
for i in range(x):
for j in range(y):
c[i, j] = a[i, j] + b[i, j]
class MyException(Exception):
pass
def guerror(a, b, c):
raise MyException
class TestUfuncBuilding(unittest.TestCase):
@tag('important')
def test_basic_ufunc(self):
ufb = UFuncBuilder(add)
cres = ufb.add("int32(int32, int32)")
self.assertFalse(cres.objectmode)
cres = ufb.add("int64(int64, int64)")
self.assertFalse(cres.objectmode)
ufunc = ufb.build_ufunc()
def check(a):
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
self.assertEqual(b.dtype, a.dtype)
a = numpy.arange(12, dtype='int32')
check(a)
# Non-contiguous dimension
a = a[::2]
check(a)
a = a.reshape((2, 3))
check(a)
# Metadata
self.assertEqual(ufunc.__name__, "add")
self.assertIn("An addition", ufunc.__doc__)
def test_ufunc_struct(self):
ufb = UFuncBuilder(add)
cres = ufb.add("complex64(complex64, complex64)")
self.assertFalse(cres.objectmode)
ufunc = ufb.build_ufunc()
def check(a):
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
self.assertEqual(b.dtype, a.dtype)
a = numpy.arange(12, dtype='complex64') + 1j
check(a)
# Non-contiguous dimension
a = a[::2]
check(a)
a = a.reshape((2, 3))
check(a)
def test_ufunc_forceobj(self):
ufb = UFuncBuilder(add, targetoptions={'forceobj': True})
cres = ufb.add("int32(int32, int32)")
self.assertTrue(cres.objectmode)
ufunc = ufb.build_ufunc()
a = numpy.arange(10, dtype='int32')
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
def test_nested_call(self):
"""
Check nested call to an implicitly-typed ufunc.
"""
builder = UFuncBuilder(outer,
targetoptions={'nopython': True})
builder.add("(int64, int64)")
ufunc = builder.build_ufunc()
self.assertEqual(ufunc(-1, 3), 2)
def test_nested_call_explicit(self):
"""
Check nested call to an explicitly-typed ufunc.
"""
builder = UFuncBuilder(outer_explicit,
targetoptions={'nopython': True})
builder.add("(int64, int64)")
ufunc = builder.build_ufunc()
self.assertEqual(ufunc(-1, 3), 2)
class TestUfuncBuildingJitDisabled(TestUfuncBuilding):
def setUp(self):
self.old_disable_jit = config.DISABLE_JIT
config.DISABLE_JIT = False
def tearDown(self):
config.DISABLE_JIT = self.old_disable_jit
class TestGUfuncBuilding(unittest.TestCase):
def test_basic_gufunc(self):
gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)")
cres = gufb.add("void(int32[:,:], int32[:,:], int32[:,:])")
self.assertFalse(cres.objectmode)
ufunc = gufb.build_ufunc()
a = numpy.arange(10, dtype="int32").reshape(2, 5)
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
self.assertEqual(b.dtype, numpy.dtype('int32'))
# Metadata
self.assertEqual(ufunc.__name__, "guadd")
self.assertIn("A generalized addition", ufunc.__doc__)
@tag('important')
def test_gufunc_struct(self):
gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)")
cres = gufb.add("void(complex64[:,:], complex64[:,:], complex64[:,:])")
self.assertFalse(cres.objectmode)
ufunc = gufb.build_ufunc()
a = numpy.arange(10, dtype="complex64").reshape(2, 5) + 1j
b = ufunc(a, a)
self.assertTrue(numpy.all(a + a == b))
def test_gufunc_struct_forceobj(self):
gufb = GUFuncBuilder(guadd, "(x, y),(x, y)->(x, y)",
targetoptions=dict(forceobj=True))
cres = gufb.add("void(complex64[:,:], complex64[:,:], complex64[:,"
":])")
self.assertTrue(cres.objectmode)
ufunc = gufb.build_ufunc()
a = | numpy.arange(10, dtype="complex64") | numpy.arange |
import sys
from pathlib import Path
import numpy as np
from netCDF4 import Dataset
def decode_woa_var(varname):
'''take WOA variable name input and output relevant info'''
input_to_woa_param = dict(
T='t',
S='s',
O2='o',
O2sat='O',
NO3='n',
Si='i',
PO4='p',
)
input_to_woa_ftype = dict(
T='A5B7',
S='A5B7',
O2='all',
O2sat='all',
NO3='all',
Si='all',
PO4='all',
)
input_to_woa_folder = dict(
T='temperature',
S='salinity',
O2='oxygen',
O2sat='o2sat',
NO3='nitrate',
Si='silicate',
PO4='phosphate',
)
param = input_to_woa_param[varname]
ftype = input_to_woa_ftype[varname]
ftpdir = input_to_woa_folder[varname]
return param, ftype, ftpdir
def get_lat_index(lat, lat_bounds):
'''function to pull appropriate WOA latitude values'''
lat_ix = np.logical_and(lat >= lat_bounds[0], lat <= lat_bounds[1])
if not lat_ix.any():
lat_ix = np.where(np.abs(lat - np.mean(lat_bounds)) == np.min(np.abs(lat - np.mean(lat_bounds))))[0]
else:
lat_ix = np.where(lat_ix)[0]
if lat_ix[0] != 0:
lat_ix = np.append(np.array([lat_ix[0]-1]), lat_ix)
if lat_ix[-1] != lat.shape[0] - 1:
lat_ix = np.append(lat_ix, np.array([lat_ix[-1]+1]))
return lat_ix
def get_lon_index(lon, lon_bounds, cross180):
'''function to pull appropriate WOA longitude values, handles crossing 180'''
if cross180:
lon_ix = np.logical_or(lon <= lon_bounds[0], lon >= lon_bounds[1])
lon_ix = np.where(lon_ix)[0]
diff_index = np.where(np.diff(lon_ix) != 1)[0]
if diff_index.shape[0] != 0:
diff_index = diff_index[0]
half1_lon_ix = np.append(lon_ix[:diff_index], np.array([lon_ix[diff_index]+1]))
half2_lon_ix = np.append(np.array([lon_ix[diff_index+1] - 1]), lon_ix[diff_index+1:])
lon_ix = np.append(half1_lon_ix, half2_lon_ix)
if lon_ix[0] != 0:
lon_ix = np.append(np.array([lon_ix[0]-1]), lon_ix)
if lon_ix[-1] != lon.shape[0] - 1:
lon_ix = np.append(lon_ix, np.array([lon_ix[-1]+1]))
else:
lon_ix = np.logical_and(lon >= lon_bounds[0], lon <= lon_bounds[1])
if not lon_ix.any():
lon_ix = np.where(np.abs(lon - np.mean(lon_bounds)) == np.min(np.abs(lon - np.mean(lon_bounds))))[0]
else:
lon_ix = np.where(lon_ix)[0]
if lon_ix[0] != 0:
lon_ix = np.append(np.array([lon_ix[0]-1]), lon_ix)
if lon_ix[-1] != lon.shape[0] - 1:
lon_ix = np.append(lon_ix, np.array([lon_ix[-1]+1]))
return lon_ix
def get_qctests(hex_code):
# hex to numeric
num = int(hex_code, 16)
# list to save test number in
tests = []
for i in range(26,0,-1):
qc_binary_id = 2**i
if qc_binary_id <= num:
num -= qc_binary_id
tests.append(i)
if num != 0:
sys.stdout.write('NOTE: decoding QC tests left a non-zero remainder, suggest investigation\n')
return tests[::-1]
def display_qctests(QCP, QCF):
QCP_numbers = get_qctests(QCP)
QCF_numbers = get_qctests(QCF)
test_descriptions = [
'Platform Identification test\t\t\t ',
'Impossible Date test\t\t\t\t ',
'Impossible Location test\t\t\t ',
'Position on Land test\t\t\t\t ',
'Impossible Speed test\t\t\t\t ',
'Global Range test\t\t\t\t ',
'Regional Global Parameter test\t\t ',
'Pressure Increasing test\t\t\t ',
'Spike test\t\t\t\t\t ',
'Top and Bottom Spike test (obsolete)\t\t ',
'Gradient test\t\t\t\t\t ',
'Digit Rollover test\t\t\t\t ',
'Stuck Value test\t\t\t\t ',
'Density Inversion test\t\t\t ',
'Grey List test\t\t\t\t ',
'Gross Salinity or Temperature Sensor Drift test',
'Visual QC test\t\t\t\t ',
'Frozen profile test\t\t\t\t ',
'Deepest pressure test\t\t\t\t ',
'Questionable Argos position test\t\t ',
'Near-surface unpumped CTD salinity test\t ',
'Near-surface mixed air/water test\t\t '
]
sys.stdout.write('---------------------------------------------------------------------------\n')
sys.stdout.write('| Test\t| Pass/Fail\t| Test name\t\t\t\t\t |\n')
sys.stdout.write('---------------------------------------------------------------------------\n')
for i,t in enumerate(test_descriptions):
if i+1 in QCF_numbers:
pfn = 'Failed'
elif i+1 in QCP_numbers:
pfn = 'Passed'
else:
pfn = 'Not performed'
sys.stdout.write('| {:d}\t| {}\t| {} |\n'.format(i+1, pfn, t))
sys.stdout.write('---------------------------------------------------------------------------\n')
def utf_decode(nc_arr, verbose=True):
dlist = []
for row in nc_arr:
rval = ''
for let in row:
rval = rval + let.decode('UTF-8')
if verbose:
print(rval)
dlist.append(rval.strip())
return dlist
def read_gain_value(fn, verbose=True):
nc = Dataset(fn, 'r')
eq = nc.variables['SCIENTIFIC_CALIB_EQUATION']
coeff = nc.variables['SCIENTIFIC_CALIB_COEFFICIENT']
comm = nc.variables['SCIENTIFIC_CALIB_COMMENT']
nprof = nc.dimensions['N_PROF'].size
ncalib = nc.dimensions['N_CALIB'].size
if nprof == 1 and ncalib == 1:
eqs = np.array(utf_decode(np.squeeze(eq[:].data), verbose=verbose)).flatten()
coeffs = np.array(utf_decode(np.squeeze(coeff[:].data), verbose=verbose)).flatten()
comms = np.array(utf_decode(np.squeeze(comm[:].data), verbose=verbose)).flatten()
ix = np.array(['DOXY_ADJUSTED' in s for s in eqs])
if np.sum(ix) == 0:
return np.array(['G=NaN']), np.array(['DOXY_ADJUSTED=DOXY*G']), np.array(['No gain value found'])
else:
G = coeffs[ix]
equation = eqs[ix]
comment = comms[ix]
elif nprof > 1 and ncalib == 1:
eqs = np.array([utf_decode(np.squeeze(eqq), verbose=verbose) for eqq in eq[:].data]).flatten()
coeffs = np.array([utf_decode(np.squeeze(cqq), verbose=verbose) for cqq in coeff[:].data]).flatten()
comms = np.array([utf_decode(np.squeeze(mqq), verbose=verbose) for mqq in comm[:].data]).flatten()
ix = np.array(['DOXY_ADJUSTED' in s for s in eqs])
if np.sum(ix) == 0:
return np.array(['G=NaN']), np.array(['DOXY_ADJUSTED=DOXY*G']), np.array(['No gain value found'])
else:
G = coeffs[ix]
equation = eqs[ix]
comment = comms[ix]
elif nprof == 1 and ncalib > 1:
eqs = np.array([utf_decode(np.squeeze(eqq), verbose=verbose) for eqq in np.squeeze(eq[:].data)]).flatten()
coeffs = np.array([utf_decode(np.squeeze(cqq), verbose=verbose) for cqq in np.squeeze(coeff[:].data)]).flatten()
comms = np.array([utf_decode(np.squeeze(mqq), verbose=verbose) for mqq in np.squeeze(comm[:].data)]).flatten()
ix = np.array(['DOXY_ADJUSTED' in s for s in eqs])
if np.sum(ix) == 0:
return np.array(['G=NaN']), np.array(['DOXY_ADJUSTED=DOXY*G']), np.array(['No gain value found'])
else:
G = coeffs[ix]
equation = eqs[ix]
comment = comms[ix]
elif nprof > 1 and ncalib > 1:
eqs = np.array([utf_decode(np.squeeze(eqq), verbose=verbose) for p1 in eq[:].data for eqq in p1]).flatten()
coeffs = np.array([utf_decode(np.squeeze(cqq), verbose=verbose) for p1 in coeff[:].data for cqq in p1]).flatten()
comms = np.array([utf_decode(np.squeeze(mqq), verbose=verbose) for p1 in comm[:].data for mqq in p1]).flatten()
ix = np.array(['DOXY_ADJUSTED' in s for s in eqs])
if np.sum(ix) == 0:
return | np.array(['G=NaN']) | numpy.array |
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch as th
from gym import spaces
from stable_baselines3.common.buffers import BaseBuffer
from stable_baselines3.common.preprocessing import get_obs_shape
from stable_baselines3.common.type_aliases import EpisodicRolloutBufferSamples, ReplayBufferSamples, RolloutBufferSamples
from stable_baselines3.common.vec_env import VecNormalize
class EpisodicBuffer(BaseBuffer):
"""
Episodic buffer used in on-policy PG algorithms like REINFORCE
It corresponds to episodes collected using the current policy.
This experience will be discarded after the policy update.
In order to use PPO objective, we also store the current value of each state
and the log probability of each taken action.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
Hence, it is only involved in policy and value function training but not action selection.
:param observation_space: Observation space
:param action_space: Action space
:param device: cpu or gpu
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
:param n_steps: N of N-step return
:param nb_rollouts: Number of rollouts to fill the buffer
:param max_episode_steps: Maximum length of an episode
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
n_steps: int = 5,
beta: float = 1.0,
nb_rollouts: int = 1,
max_episode_steps: int = 1,
verbose=False,
):
if verbose:
print("nb rollouts:", nb_rollouts)
print("max episode length:", max_episode_steps)
buffer_size = nb_rollouts * max_episode_steps
super(EpisodicBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)
self.gae_lambda = gae_lambda
self.n_steps = n_steps
self.gamma = gamma
self.beta = beta
# maximum steps in episode
self.max_episode_steps = max_episode_steps
self.current_idx = 0
self.episode_idx = 0
# Counter to prevent overflow
self.episode_steps = 0
self.nb_rollouts = nb_rollouts
# buffer with episodes
# number of episodes which can be stored until buffer size is reached
# self.nb_rollouts = self.buffer_size // self.max_episode_steps
self.current_idx = 0
# Counter to prevent overflow
self.episode_steps = 0
# Get shape of observation and goal (usually the same)
self.obs_shape = get_obs_shape(self.observation_space)
print(self.obs_shape)
# episode length storage, needed for episodes which has less steps than the maximum length
self.episode_lengths = np.zeros(self.nb_rollouts, dtype=np.int64)
assert self.n_envs == 1, "Episodic buffer only supports single env for now"
self.reset()
def add(
self,
obs: Dict[str, np.ndarray],
action: np.ndarray,
value: np.ndarray,
reward: np.ndarray,
episode_start: np.ndarray,
done: np.ndarray,
infos: List[Dict[str, Any]],
) -> None:
self._buffer["observation"][self.episode_idx, self.current_idx] = obs
self._buffer["action"][self.episode_idx, self.current_idx] = action
self.values[self.episode_idx, self.current_idx] = value
self.rewards[self.episode_idx, self.current_idx] = reward
self.episode_starts[self.episode_idx, self.current_idx] = episode_start
self.dones[self.episode_idx, self.current_idx] = done
# update current pointer
self.current_idx += 1
self.episode_steps += 1
if done or self.episode_steps >= self.max_episode_steps:
self.store_episode()
self.episode_steps = 0
def get_all_indices(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Retrieve all samples valid indices, taking episode length
into account.
"""
all_episodes = np.concatenate([np.ones(ep_len) * ep_idx for ep_idx, ep_len in enumerate(self.episode_lengths)])
all_transitions = np.concatenate([np.arange(ep_len) for ep_len in self.episode_lengths])
return all_episodes.astype(np.uint64), all_transitions.astype(np.uint64)
def get_samples(self) -> EpisodicRolloutBufferSamples:
total_steps = sum(self.episode_lengths)
all_indices = self.get_all_indices()
# Retrieve all transition and flatten the arrays
return EpisodicRolloutBufferSamples(
self.to_torch(self._buffer["observation"][all_indices].reshape(total_steps, *self.obs_shape)),
self.to_torch(self._buffer["action"][all_indices].reshape(total_steps, self.action_dim)),
self.to_torch(self.policy_returns[all_indices].reshape(total_steps)),
self.to_torch(self.target_values[all_indices].reshape(total_steps)),
)
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> Union[ReplayBufferSamples, RolloutBufferSamples]:
"""
:param batch_inds:
:param env:
:return:
"""
raise NotImplementedError()
def store_episode(self) -> None:
"""
Increment episode counter
and reset current episode index.
"""
# add episode length to length storage
self.episode_lengths[self.episode_idx] = self.current_idx
self.episode_idx += 1
self.current_idx = 0
@property
def n_episodes_stored(self) -> int:
return self.episode_idx
def size(self) -> int:
"""
:return: The current number of transitions in the buffer.
"""
return int(np.sum(self.episode_lengths))
def reset(self) -> None:
"""
Reset the buffer.
"""
self.values = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.log_probs = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.episode_starts = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
self.dones = np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32)
# input dimensions for buffer initialization
self.input_shape = {
"observation": (self.n_envs,) + self.obs_shape,
"action": (self.action_dim,),
}
self._buffer = {
key: np.zeros((self.nb_rollouts, self.max_episode_steps, *dim), dtype=np.float32)
for key, dim in self.input_shape.items()
}
self.policy_returns = | np.zeros((self.nb_rollouts, self.max_episode_steps), dtype=np.float32) | numpy.zeros |
import numpy as np
import cv2 as cv
import skimage.draw as draw
def bytescaling(data, cmin=None, cmax=None, high=255, low=0):
"""
Converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255). If the input image already has
dtype uint8, no scaling is done.
:param data: 16-bit image data array
:param cmin: bias scaling of small values (def: data.min())
:param cmax: bias scaling of large values (def: data.max())
:param high: scale max value to high. (def: 255)
:param low: scale min value to low. (def: 0)
:return: 8-bit image data array
"""
if data.dtype == np.uint8:
return data
if high > 255:
high = 255
if low < 0:
low = 0
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(np.uint8)
def normalize_minmax(frame, min_value, max_value):
return (frame - min_value) / (max_value - min_value)
def clip_and_fill(frame, min_value, max_value, fill_value="uniform"):
nan_mask = np.isnan(frame)
if fill_value == "uniform":
fill_value = np.random.uniform(
min_value, max_value, size=np.sum(nan_mask))
elif fill_value == "normal":
mean = (min_value + max_value) / 2
std = (max_value - mean) / 4 # since 2 std = 98% of coverage
fill_value = np.random.normal(
mean, std, size=np.sum(nan_mask))
frame[nan_mask] = fill_value
clipped = np.clip(frame, min_value, max_value)
return clipped
def resize_to(image, dsize):
if image.shape[0] != dsize[0] or image.shape[1] != dsize[1]:
return cv.resize(image, dsize[::-1])
return image
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_matchesv2(pred_boxes, gt_boxes, query_iou=0.5):
""" compute the matches given that the boxes are same class
Input:
pred_boxes [n_pred, (y1,x1,y2,x2)]
gt_boxes [n_gt, (y1,x1,y2,x2)]
iou_threshold float
"""
pred_match = np.zeros([pred_boxes.shape[0]], dtype=np.bool)
if pred_match.shape[0] == 0 or gt_boxes.shape[0] == 0:
return pred_match
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = compute_overlaps(pred_boxes, gt_boxes)
darg = | np.argsort(-overlaps, axis=1) | numpy.argsort |
import numpy as np
import scipy as sp
def sim_state_eq( A, B, xi, U):
"""This function caclulates the trajectory for the network given our model
if there are no constraints, and the target state is unknown, using the
control equation precess x(t+1) = Ax(t) + BU(t). x(t) is the state vector, A is
the adjacency matrix, U(t) is the time varying input as specified by the
user, and B selects the control set (stimulating electrodes)
Args:
A : NxN state matrix (numpy array), where N is the number of nodes in your
network (for example, a structural connectivity matrix
constructed from DTI). A should be stable to prevent
uncontrolled trajectories.
B : NxN input matrix (numpy array), where N is the number of nodes. B
selects where you want your input energy to be applied to.
For example, if B is the Identity matrix, then input energy
will be applied to all nodes in the network. If B is a
matrix of zeros, but B(1,1) = 1. then energy will only be
applied at the first node.
xi : Nx1 initial state (numpy array) of your system where N is the number of
nodes. xi MUST have N rows.
U : NxT matrix of Energy (numpy array), where N is the number of nodes
and T is the number of
time points. For example, if you want to simulate the
trajectory resulting from stimulation, U could have
log(StimFreq)*StimAmp*StimDur as every element. You can
also enter U's that vary with time
Returns:
x : x is the NxT trajectory (numpy array) that results from simulating
x(t+1) = Ax(t) + Bu(t) the equation with the parameters
above.
@author JStiso
June 2017
"""
# Simulate trajectory
T = np.size(U,1)
N = np.size(A,0)
# initialize x
x = np.zeros((N, T))
xt = xi
for t in range(T):
x[:,t] = np.reshape(xt, N) # annoying python 1d array thing
xt_1 = np.matmul(A,xt) + np.matmul(B,np.reshape(U[:,t],(N,1) ))# state equation
xt = xt_1
return x
def optimal_energy(A, T, B, x0, xf, rho, S):
"""This is a python adaptation of matlab code originally written by <NAME> and <NAME>
compute optimal inputs/trajectories for a system to transition between two states
<NAME> September 2017
Args:
A: (NxN numpy array) Structural connectivity matrix
B: (NxN numpy array) Input matrix: selects which nodes to put input into. Define
so there is a 1 on the diagonal of elements you want to add input to,
and 0 otherwise
S: (NxN numpy array) Selects nodes whose distance you want to constrain, Define so
that there is a 1 on the diagonal of elements you want to
constrain, and a zero otherwise
T: (float) Time horizon: how long you want to control for. Too large will give
large error, too short will not give enough time for control
rho: (float) weights energy and distance constraints. Small rho leads to larger
energy
Returns:
X_opt: (TxN numpy array)
The optimal trajectory through state space
U_opt: (TxN numpy array)
The optimal energy
n_err: (float)
the error associated with this calculation. Errors will be larger when B is not identity,
and when A is large. Large T and rho will also tend to increase the error
-------------- Change Log -------------
JStiso April 2018
Changed S to be an input, rather than something defined internally
<NAME> January 2021
Changed the forward propagation of states to matrix exponential to
avoid reliance on MATLAB toolboxes. Also changed definition of expanded
input U to save time by avoiding having to resize the matrix.
Also changed the initialization of U_opt for the same reason.
JStiso 2021
Translated to Python
"""
n = np.shape(A)[1]
# state vectors to float if they're bools
if type(x0[0]) == np.bool_:
x0 = x0.astype(float)
if type(xf[0]) == np.bool_:
xf = xf.astype(float)
Sbar = np.eye(n) - S
np.shape(np.dot(-B,B.T)/(2*rho))
Atilde = np.concatenate((np.concatenate((A, np.dot(-B,B.T)/(2*rho)), axis=1),
np.concatenate((-2*S, -A.T), axis=1)), axis=0)
M = sp.linalg.expm(Atilde*T)
M11 = M[0:n,0:n]
M12 = M[0:n,n:]
M21 = M[n:,0:n]
M22 = M[n:,n:]
N = np.linalg.solve(Atilde,(M-np.eye(np.shape(Atilde)[0])))
c = np.dot(np.dot(N,np.concatenate((np.zeros((n,n)),S),axis = 0)),2*xf)
c1 = c[0:n]
c2 = c[n:]
p0 = np.dot(np.linalg.pinv(np.concatenate((np.dot(S,M12),np.dot(Sbar,M22)), axis = 0)),
(-np.dot(np.concatenate((np.dot(S,M11),np.dot(Sbar,M21)),axis=0),x0) -
np.concatenate((np.dot(S,c1),np.dot(Sbar,c2)), axis=0) +
np.concatenate((np.dot(S,xf),np.zeros((n,1))), axis=0)))
n_err = np.linalg.norm(np.dot(np.concatenate((np.dot(S,M12),np.dot(Sbar,M22)), axis = 0),p0) -
(-np.dot(np.concatenate((np.dot(S,M11),np.dot(Sbar,M21)),axis=0),x0) -
np.concatenate((np.dot(S,c1),np.dot(Sbar,c2)), axis=0) +
np.concatenate((np.dot(S,xf),np.zeros((n,1))), axis=0))) # norm(error)
STEP = 0.001
t = np.arange(0,(T+STEP),STEP)
U = np.dot(np.ones((np.size(t),1)),2*xf.T)
# Discretize continuous-time input for convolution
Atilde_d = sp.linalg.expm(Atilde*STEP)
Btilde_d = np.linalg.solve(Atilde,
np.dot((Atilde_d-np.eye(2*n)),np.concatenate((np.zeros((n,n)),S), axis=0)))
# Propagate forward discretized model
xp = np.zeros((2*n,np.size(t)))
xp[:,0:1] = np.concatenate((x0,p0), axis=0)
for i in np.arange(1,np.size(t)):
xp[:,i] = np.dot(Atilde_d,xp[:,i-1]) + np.dot(Btilde_d,U[i-1,:].T)
xp = xp.T
U_opt = np.zeros((np.size(t),np.shape(B)[1]))
for i in range(np.size(t)):
U_opt[i,:] = -(1/(2*rho))* | np.dot(B.T,xp[i,n:].T) | numpy.dot |
# -*- coding: utf-8 -*-
# This scripts uses CACC net on a single image, compares result to ground truth and shows them side by side
import torch
import torchvision
import matplotlib.pyplot as plt
import matplotlib.cm as CM
import numpy as np
import argparse
from cannet import CANNet
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default="shtech", type=str, help="Dataset to use. \"venice\" or \"shtech\"")
parser.add_argument("--root", default="D:/Alex/", type=str, help="Root dir of datasets (with trailing backslash)")
parser.add_argument("--dev", default="cpu", type=str, help="Use cpu or gpu? For gpu cuda is mandatory")
args = parser.parse_args()
dataset_prefdir = args.root
if args.dataset == "venice":
img_root= dataset_prefdir + 'venice/test_data/images/'
gt_dmap_root= dataset_prefdir + 'venice/test_data/ground_truth/'
img_name = '4895_000060'
model_param_path='./checkpoints/venice_epoch_991.pth'
data_mean = [0.531, 0.508, 0.474]
data_std = [0.193, 0.189, 0.176]
elif args.dataset == "shtech":
img_root= dataset_prefdir + 'ShanghaiTech/part_A_final/test_data/images/'
gt_dmap_root= dataset_prefdir + 'ShanghaiTech/part_A_final/test_data/ground_truth/'
img_name = 'IMG_32'
model_param_path='./checkpoints/cvpr2019_CAN_SHHA_353.pth'
data_mean = [0.409, 0.368, 0.359]
data_std = [0.286, 0.274, 0.276]
else:
raise Exception("Unknown dataset. Use \"venice\" or \"shtech\".")
img_path = img_root + img_name + '.jpg'
gt_path = gt_dmap_root + img_name + '.npy'
if args.dev == "cpu":
device=torch.device("cpu")
elif args.dev == "gpu":
device = torch.device("cuda")
torch.backends.cudnn.enabled = True # use cudnn?
else:
raise Exception("Unknown device. Use \"cpu\" or \"gpu\".")
model=CANNet().to(device)
model.load_state_dict(torch.load(model_param_path))
model.eval()
torch.no_grad()
gt_dmap = np.load(gt_path)
img_orig = Image.open(img_path)
img = torchvision.transforms.ToTensor()(img_orig)
# -------- to remove once training is done with new norm -----------------------------
data_mean = [0.485, 0.456, 0.406]
data_std = [0.229, 0.224, 0.225]
# ------------------------------------------------------------------------------------
img = torchvision.transforms.functional.normalize(img, mean=data_mean, std=data_std)
img = img.unsqueeze(0).to(device)
# forward propagation
et_dmap=model(img).detach()
et_dmap=et_dmap.squeeze(0).squeeze(0).cpu().numpy()
fig, ax = plt.subplots(1,3, figsize=(15,4))
et = et_dmap.sum()
gt = gt_dmap.sum()
er = | np.abs(et-gt) | numpy.abs |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Get basic statistics describing the database
# Compare a structure to a database
from tqdm.autonotebook import tqdm
import logging
from pymatgen import Structure
from pymatgen.analysis.graphs import StructureGraph
from pymatgen.analysis.local_env import JmolNN
from .utils import (
get_structure_list,
get_rmsd,
closest_index,
tanimoto_distance,
get_number_bins,
)
import random
from scipy.spatial import distance
from sklearn.linear_model import HuberRegressor
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.metrics.pairwise import euclidean_distances
from scipy.stats import (
pearsonr,
ks_2samp,
mannwhitneyu,
ttest_ind,
anderson_ksamp,
gmean,
kurtosis,
mode,
variation,
skew,
normaltest,
kruskal,
median_absolute_deviation,
)
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from scipy import ndimage
import concurrent.futures
from functools import partial
from numba import jit
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("RemoveDuplicates")
logger.setLevel(logging.DEBUG)
# ToDo (maybe) make sure that input data is numeric?
# Todo: grid search for kernel width in MMD test
class Statistics:
def __init__(self):
pass
@staticmethod
def _get_one_graph_comparison(
structure_list_a: list, structure_list_b: list, _
) -> float:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
_:
Returns:
Jaccard distance between two random structure graphs
"""
logger.debug("i am in the graph comparison routine")
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
crystal_a = Structure.from_file(random_selection_1)
crystal_b = Structure.from_file(random_selection_2)
nn_strategy = JmolNN()
sgraph_a = StructureGraph.with_local_env_strategy(crystal_a, nn_strategy)
sgraph_b = StructureGraph.with_local_env_strategy(crystal_b, nn_strategy)
return sgraph_a.diff(sgraph_b, strict=False)["dist"]
except Exception:
return np.nan
@staticmethod
@jit
def euclidean_distance(u: np.ndarray, v: np.ndarray) -> float:
"""
Args:
u:
v:
Returns:
"""
return np.linalg.norm(u - v)
@staticmethod
def _randomized_graphs(
structure_list_a: list,
structure_list_b: list,
iterations: int = 5000,
njobs: int = 2,
) -> list:
"""
Randomly sample structures from the structure list and compare their Jaccard graph distance.
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
iterations (int): Number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of workers
Returns:
list of length iterations of the Jaccard distances
"""
diffs = []
get_one_graph_comparison_partial = partial(
Statistics._get_one_graph_comparison, structure_list_a, structure_list_b
)
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for graph comparisons")
for diff in tqdm(
executor.map(get_one_graph_comparison_partial, range(iterations)),
total=len(range(iterations)),
):
diffs.append(diff)
return diffs
@staticmethod
def _get_one_randomized_structure_property(
structure_list_a: list, structure_list_b: list, feature: str, _
) -> float:
"""
Returns difference between the selected property for two random structures.
Args:
structure_list_a (list): list of paths (str) to structures
structure_list_b (list): list of paths (str) to structures
feature (str): feature that shall be compared, available are 'density', 'num_sites'
and 'volume
_:
Returns:
difference of feature for two randomly selected structures
"""
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
crystal_a = Structure.from_file(random_selection_1)
crystal_b = Structure.from_file(random_selection_2)
if feature == "density":
diff = np.abs(crystal_a.density - crystal_b.density)
elif feature == "num_sites":
diff = np.abs(crystal_a.num_sites - crystal_b.num_sites)
elif feature == "volume":
diff = np.abs(crystal_a.volume - crystal_b.volume)
return diff
except Exception:
return np.nan
@staticmethod
def _randomized_structure_property(
structure_list_a: list,
structure_list_b: list,
feature: str = "density",
iterations: int = 5000,
njobs: int = 2,
) -> list:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
feature (str): property that is used for the structure comparisons, available options are
density, num_sites, volume. Default is density.
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of concurrent workers
Returns:
list with rmsds
"""
diffs = []
get_one_randomized_structure_property_partial = partial(
Statistics._get_one_randomized_structure_property,
structure_list_a,
structure_list_b,
feature,
)
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for graph comparisons")
for diff in tqdm(
executor.map(
get_one_randomized_structure_property_partial, range(iterations)
),
total=len(range(iterations)),
):
diffs.append(diff)
return diffs
@staticmethod
def _get_one_rmsd(structure_list_a: list, structure_list_b: list, _) -> float:
logger.debug("i am in the _get_one_rmsd routine")
try:
random_selection_1 = random.sample(structure_list_a, 1)[0]
random_selection_2 = random.sample(structure_list_b, 1)[0]
a = get_rmsd(random_selection_1, random_selection_2)
return a
except Exception as e:
logger.error("Exception %s occured", e)
return np.nan
@staticmethod
def _randomized_rmsd(
structure_list_a: list,
structure_list_b: list,
iterations: float = 5000,
njobs: int = 2,
) -> list:
"""
Args:
structure_list_a (list): list of paths to structures
structure_list_b (list): list of paths to structures
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
njobs (int): the maximum number of concurrent workers
Returns:
"""
rmsds = []
with concurrent.futures.ProcessPoolExecutor(max_workers=njobs) as executor:
logger.debug("iterating for rmsd comparisons")
get_one_rmsd_partial = partial(
Statistics._get_one_rmsd, structure_list_a, structure_list_b
)
for rmsd in tqdm(
executor.map(get_one_rmsd_partial, range(iterations)),
total=len(range(iterations)),
):
rmsds.append(rmsd)
return rmsds
@staticmethod
def optimal_knn(data, max_cluster: int = 20):
"""
use silhouette scores to find the optimal number of clusters.
we use silhouette scores as they are easier to use in a algorithm
than the "elbow criterion"
Args:
data (np.array): data matrix
max_cluster (int): maximum number of clusters. Optimization will happen
for all cluster numbers k in (2, min(len(data), max_cluster))
Returns:
"""
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
logger.debug("searching for optimal knn clustering")
silhouette_scores = []
n_clusters = []
# avoid that k > len(data)
upper_boundary = np.min([len(data), max_cluster])
sc = StandardScaler()
data = sc.fit_transform(data)
for n_cluster in range(2, upper_boundary):
kmeans = KMeans(n_clusters=n_cluster).fit(data)
label = kmeans.labels_
sil_coeff = silhouette_score(data, label, metric="euclidean")
silhouette_scores.append(sil_coeff)
n_clusters.append(n_cluster)
optimal_n_cluster = n_clusters[np.argmax(silhouette_scores)]
kmeans = KMeans(n_clusters=optimal_n_cluster).fit(data)
logger.info("found optimal knn clustering with %s clusters", optimal_n_cluster)
return kmeans, optimal_n_cluster
@staticmethod
def trimean(data):
"""
Args:
data: numeric data
Returns:
trimean (float) for data
"""
q1 = np.quantile(data, 0.25)
q3 = np.quantile(data, 0.75)
return (q1 + 2 * np.median(data) + q3) / 4
@staticmethod
def interquartile_mean(data):
"""
Args:
data: numeric data
Returns:
interquartile mean (float) for data
"""
q1 = np.quantile(data, 0.25)
q3 = np.quantile(data, 0.75)
sorted_data = np.sort(data)
trimmed_data = sorted_data[(sorted_data >= q1) & (sorted_data <= q3)]
return np.mean(trimmed_data)
@staticmethod
def midhinge(data):
"""
Args:
data: numeric data
Returns:
midhinge mean (float) for data
"""
q1 = np.quantile(data, 0.25)
q3 = np.quantile(data, 0.75)
return np.mean([q1, q3])
@staticmethod
def val_range(data):
"""
Args:
data: numeric data
Returns:
value range (float)
"""
max_val = np.max(data)
min_val = np.min(data)
return abs(max_val - min_val)
@staticmethod
def mid_range(data):
"""
Args:
data: numeric data
Returns:
midpoint of value range as measure of centrality
"""
return (np.max(data) + np.min(data)) / 2
class DistStatistic(Statistics):
def __init__(self, structure_list=None, property_list=None, njobs: int = 1):
self.structure_list = structure_list
self.property_list = property_list
self.feature_names = None
self.properties_statistics = {}
self.njobs = njobs
if property_list is not None:
if isinstance(property_list, pd.DataFrame):
logger.debug("Input seems to be a dataframe")
self.list_of_list_mode = True
self.feature_names = self.property_list.columns.values
logger.debug("will use %s as feature names", self.feature_names)
_tmp_property_list = []
self.property_list = self.property_list._get_numeric_data()
for feature in self.feature_names:
_tmp_property_list.append(
self.property_list[feature].values.astype(np.float32).tolist()
)
self.property_list = _tmp_property_list
else:
if all(isinstance(i, list) for i in property_list):
self.list_of_list_mode = True
self.feature_names = [
"_".join(["feature", i]) for i in range(len(self.property_list))
]
else:
self.list_of_list_mode = False
def __repr__(self):
return "DistStatistic"
@classmethod
def from_folder(cls, folder: str, extension: str = "cif", njobs: int = 2):
"""
Args:
folder (str): name of the folder which is used to create the structure list
extension (str): extension of the structure files
njobs (int): the maximum number of concurrent workers
Returns:
"""
sl = get_structure_list(folder, extension)
return cls(sl, njobs=njobs)
def randomized_graphs(self, iterations: int = 5000) -> list:
"""
Returns iterations times the Jaccard distance between structure graph of two randomly chosen structures
Args:
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
Returns:
list of jaccard distances
"""
jaccards = self._randomized_graphs(
self.structure_list, self.structure_list, iterations
)
return jaccards
def randomized_structure_property(
self, feature: str = "density", iterations: int = 5000
) -> list:
"""
Returns iterations times the Euclidean distance between two randomly chosen structures
Args:
feature (str): property that is used for the structure comparisons, available options are
density, num_sites, volume. Default is density.
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
Returns:
list of property distances
"""
distances = self._randomized_structure_property(
self.structure_list, self.structure_list, feature, iterations, self.njobs
)
return distances
def randomized_rmsd(self, iterations: int = 5000) -> list:
"""
Returns iterations times the Kabsch RMSD between two randomly chosen structures
Args:
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
Returns:
list of Kabsch RMSDs
"""
distances = self._randomized_rmsd(
self.structure_list, self.structure_list, iterations, self.njobs
)
return distances
@staticmethod
def _properties_test_statistics(property_list):
"""
Returns various descriptive statistics for an array.
Inspired by what <NAME> (https://github.com/EricSchles) presented in the NYC data science meetup
Args:
property_list (list): list of numeric values for which descriptive statistics will be computed
Returns:
result_dict (dict): dictionary with descriptive statistics
"""
property_list = np.array(property_list)
q1 = np.percentile(property_list, 25)
q3 = np.percentile(property_list, 75)
agostino = normaltest(property_list)
result_dict = {
"trimean": Statistics.trimean(property_list),
"midhinge": Statistics.midhinge(property_list),
"interquartile_mean": Statistics.interquartile_mean(property_list),
"value_range": Statistics.val_range(property_list),
"mid_range": Statistics.mid_range(property_list),
"minimum": np.min(property_list),
"maximum": np.max(property_list),
"median": np.median(property_list),
"stdev": np.std(property_list),
"geometric_mean": gmean(property_list),
"mean": np.mean(property_list),
"interquartile_range": q3 - q1,
"quartile_1": q1,
"quartile_3": q3,
"MAD": median_absolute_deviation(property_list),
"kurtosis": kurtosis(property_list),
"mode": mode(property_list),
"variation": variation(property_list),
"skew": skew(property_list),
"D_Agostino_statistic": agostino[0],
"D_Agostino_p_value": agostino[1],
}
return result_dict
def properties_test_statistics(self):
"""
Returns:
dictionary with descriptive statistics for each feature column.
"""
if self.list_of_list_mode:
# concurrently loop of the different feature columns.
with concurrent.futures.ProcessPoolExecutor(
max_workers=self.njobs
) as executor:
logger.debug("looping over feature columns for properties statistics")
out_dict = {}
for i, results_dict in enumerate(
executor.map(
DistStatistic._properties_test_statistics, self.property_list
)
):
logger.debug("Creating statistics for %s", self.feature_names[i])
self.properties_statistics[self.feature_names[i]] = results_dict
out_dict[self.feature_names[i]] = results_dict
return out_dict
else:
out_dict = {}
results_dict = DistStatistic._properties_test_statistics(self.property_list)
self.properties_statistics[self.feature_names] = results_dict
out_dict[self.feature_names] = results_dict
return out_dict
class DistComparison(Statistics):
"""
Comparator to compare the difference or similarity between two distributions.
The idea is here to save the test statistics to the object such that we can
then implement some dunder methods to compare different Comparator objects and
e.g. find out which distributions are most similar to each other.
"""
def __init__(
self,
structure_list_1: list = None,
structure_list_2: list = None,
property_list_1: [list, pd.DataFrame] = None,
property_list_2: [list, pd.DataFrame] = None,
njobs: int = 2,
):
"""
Args:
structure_list_1 (list):
structure_list_2 (list):
property_list_1 (list or pd.DataFrame):
property_list_2 (list or pd.DataFrame):
"""
self.structure_list_1 = structure_list_1
self.structure_list_2 = structure_list_2
self.property_list_1 = property_list_1
self.property_list_2 = property_list_2
self.feature_names = []
self.qq_statistics = {}
self.properties_statistics = {}
self.rmsds = None
self.jaccards = None
self.random_structure_property = {}
self.njobs = njobs
if (property_list_1 is not None) and (property_list_2 is not None):
if not isinstance(self.property_list_1, type(self.property_list_2)):
raise ValueError("The two property inputs must be of same type")
# Check if input is a dataframe. If this is the case, extract the column names
# and convert it to list of lists
if isinstance(property_list_1, pd.DataFrame):
logger.debug(
"Input seems to be a dataframe, will only use numeric data"
)
self.list_of_list_mode = True
self.property_list_1 = self.property_list_1._get_numeric_data()
self.feature_names = self.property_list_1.columns.values
logger.debug("will use %s as feature names", self.feature_names)
_tmp_property_list_1 = []
for feature in self.feature_names:
_tmp_property_list_1.append(
self.property_list_1[feature].values.astype(np.float32).tolist()
)
self.property_list_1 = _tmp_property_list_1
_tmp_property_list_2 = []
self.property_list_2 = self.property_list_2._get_numeric_data()
for feature in self.feature_names:
_tmp_property_list_2.append(
self.property_list_2[feature].values.astype(np.float32).tolist()
)
self.property_list_2 = _tmp_property_list_2
assert len(self.property_list_1) == len(self.feature_names)
assert len(self.property_list_2) == len(self.feature_names)
else:
# Check if the input is a list of list (i.e. multiple feature columns)
# if this is the case, we have to iterate over the lists to compute the test statistics
if all(isinstance(i, list) for i in property_list_1):
if all(isinstance(i, list) for i in property_list_2):
self.list_of_list_mode = True
self.feature_names = [
"_".join(["feature", i])
for i in range(len(self.property_list_1))
]
else:
logger.error(
"One input seems to be a list of list whereas the other one is not. "
"The property lists must be both of the same type. Please check your inputs."
)
else:
if all(isinstance(i, list) for i in property_list_2):
logger.error(
"One input seems to be a list of list whereas the other one is not. "
"The property lists must be both of the same type. Please check your inputs."
)
else:
self.feature_names.append("feature_0")
self.list_of_list_mode = False
def __repr__(self):
return "DistComparison"
def __len__(self):
return (
len(self.structure_list_1)
+ len(self.structure_list_2)
+ len(self.property_list_1)
+ len(self.property_list_2)
)
@classmethod
def from_folders(
cls,
folder_1: str,
folder_2: str,
property_list_1: [list, pd.DataFrame] = None,
property_list_2: [list, pd.DataFrame] = None,
extension="cif",
njobs: int = 2,
):
"""Constructor method for a DistComparison object"""
sl_1 = get_structure_list(folder_1, extension)
sl_2 = get_structure_list(folder_2, extension)
return cls(sl_1, sl_2, property_list_1, property_list_2, njobs=njobs)
def randomized_graphs(self, iterations: int = 5000) -> list:
"""
Returns iterations times the Jaccard distance between structure graph of two randomly chosen structures
Args:
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
Returns:
list of jaccard distances
"""
jaccards = self._randomized_graphs(
self.structure_list_1, self.structure_list_2, iterations, self.njobs
)
self.jaccards = jaccards
return jaccards
def randomized_structure_property(
self, feature: str = "density", iterations: int = 5000
) -> list:
"""
Returns iterations times the Euclidean distance between two randomly chosen structures
Args:
feature (str): property that is used for the structure comparisons, available options are
density, num_sites, volume. Default is density.
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
Returns:
list of property distances
"""
distances = self._randomized_structure_property(
self.structure_list_1,
self.structure_list_2,
feature,
iterations,
self.njobs,
)
self.random_structure_property[feature] = distances
return distances
def randomized_rmsd(self, iterations: int = 5000) -> list:
"""
Returns iterations times the Kabsch RMSD between two randomly chosen structures
Args:
iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might
be sampled several times).
Returns:
list of Kabsch RMSDs
"""
distances = self._randomized_rmsd(
self.structure_list_1, self.structure_list_2, iterations, self.njobs
)
self.rmsds = distances
return distances
def cluster_comparison(self, n_runs: int = 4):
"""
Performs distance measurements based on (optimal) knn clustering. The following metrics
are computed:
- Optimal clustering on both property distributions based on a Shilouethette criterion
- Clustering on the other distribution with the same number of clusters, distance between
the centroids
- max min (d(x_i, c_j)) and max min (d(x_i, c_i)), the maximum minimal distance between the
data points and the cluster centroids
- mean min (d(x_i, c_j)) and mean min (d(x_i, c_i)), the mean minimal distance between the
data points and the cluster centroids
- median min (d(x_i, c_j)) and median min (d(x_i, c_i)), the median minimal distance between the
data points and the cluster centroids
- trimean min (d(x_i, c_j)) and trimean min (d(x_i, c_i)), the trimean minimal distance between the
data points and the cluster centroids
- min min (d(x_i, c_j)) and min min (d(x_i, c_i)), the minimum minimal distance between the
data points and the cluster centroids
Ideally, one would want the outer metrics to be similar to the inner metrics.
To avoid influences from from different scales/units, the data is standardized by default.
As the outcome of the knn clusterings is randomized, especically if the clustering is bad, we
bootstrap the metrics by default.
Returns:
dict with the metrics (floats)
"""
from sklearn.preprocessing import StandardScaler
from scipy.spatial import KDTree
n_cluster_1s = []
n_cluster_2s = []
k_d_1_1s_min = []
k_d_1_2s_min = []
k_d_2_2s_min = []
k_d_2_1s_min = []
k_d_1_1s_max = []
k_d_1_2s_max = []
k_d_2_2s_max = []
k_d_2_1s_max = []
k_d_1_1s_mean = []
k_d_1_2s_mean = []
k_d_2_2s_mean = []
k_d_2_1s_mean = []
k_d_1_1s_median = []
k_d_1_2s_median = []
k_d_2_2s_median = []
k_d_2_1s_median = []
k_d_1_1s_trimean = []
k_d_1_2s_trimean = []
k_d_2_2s_trimean = []
k_d_2_1s_trimean = []
distance_clustering_1s = []
distance_clustering_2s = []
for i in tqdm(range(n_runs)):
knn_1, n_cluster_1 = self.optimal_knn(
np.transpose(np.array(self.property_list_1))
)
n_cluster_1s.append(n_cluster_1)
sc = StandardScaler()
tree_1 = KDTree(knn_1.cluster_centers_)
k_d_1_1, _ = tree_1.query(
sc.fit_transform(np.transpose(np.array(self.property_list_1))), k=1
)
k_d_1_2, _ = tree_1.query(
sc.fit_transform(np.transpose(np.array(self.property_list_2))), k=1
)
k_d_1_1s_min.append(np.min(k_d_1_1))
k_d_1_2s_min.append(np.min(k_d_1_2))
k_d_1_1s_max.append(np.max(k_d_1_1))
k_d_1_2s_max.append(np.max(k_d_1_2))
k_d_1_1s_mean.append(np.mean(k_d_1_1))
k_d_1_2s_mean.append(np.mean(k_d_1_2))
k_d_1_1s_median.append(np.median(k_d_1_1))
k_d_1_2s_median.append(np.median(k_d_1_2))
k_d_1_1s_trimean.append(self.trimean(k_d_1_1))
k_d_1_2s_trimean.append(self.trimean(k_d_1_2))
kmeans_1 = KMeans(n_clusters=n_cluster_1).fit(
sc.fit_transform(np.transpose(np.array(self.property_list_2)))
)
distance_clustering_1 = self.euclidean_distance(
knn_1.cluster_centers_, kmeans_1.cluster_centers_
)
distance_clustering_1s.append(distance_clustering_1)
knn_2, n_cluster_2 = self.optimal_knn(
np.transpose(np.array(self.property_list_2))
)
n_cluster_2s.append(n_cluster_2)
tree_2 = KDTree(knn_2.cluster_centers_)
k_d_2_2, _ = tree_2.query(
sc.fit_transform(np.transpose(np.array(self.property_list_2))), k=1
)
k_d_2_1, _ = tree_2.query(
sc.fit_transform(np.transpose(np.array(self.property_list_1))), k=1
)
k_d_2_2s_min.append(np.min(k_d_2_2))
k_d_2_1s_min.append(np.min(k_d_2_1))
k_d_2_2s_max.append(np.max(k_d_2_2))
k_d_2_1s_max.append(np.max(k_d_2_1))
k_d_2_2s_mean.append(np.mean(k_d_2_2))
k_d_2_1s_mean.append(np.mean(k_d_2_1))
k_d_2_2s_median.append(np.median(k_d_2_2))
k_d_2_1s_median.append(np.median(k_d_2_1))
k_d_2_2s_trimean.append(self.trimean(k_d_2_2))
k_d_2_1s_trimean.append(self.trimean(k_d_2_1))
kmeans_2 = KMeans(n_clusters=n_cluster_2).fit(
sc.fit_transform(np.transpose(np.array(self.property_list_1)))
)
distance_clustering_2 = self.euclidean_distance(
knn_2.cluster_centers_, kmeans_2.cluster_centers_
)
distance_clustering_2s.append(distance_clustering_2)
result_dict = {
"mean_n_cluster_1": np.mean(n_cluster_1s),
"mean_n_cluster_2": np.mean(n_cluster_2s),
"mean_euclidean_1": np.mean(distance_clustering_1s),
"mean_euclidean_2": np.mean(distance_clustering_2s),
"mean_max_min_inner_1": np.mean(k_d_1_1),
"mean_max_min_outer_1": np.mean(k_d_1_2),
"mean_max_min_inner_2": np.mean(k_d_2_2),
"mean_max_min_outer_2": np.mean(k_d_2_1),
"mean_mean_min_inner_1": np.mean(k_d_1_1),
"mean_mean_min_outer_1": np.mean(k_d_1_2),
"mean_mean_min_inner_2": np.mean(k_d_2_2),
"mean_mean_min_outer_2": np.mean(k_d_2_1),
"mean_median_min_inner_1": np.mean(k_d_1_1),
"mean_median_min_outer_1": np.mean(k_d_1_2),
"mean_median_min_inner_2": np.mean(k_d_2_2),
"mean_median_min_outer_2": np.mean(k_d_2_1),
"mean_trimean_min_inner_1": np.mean(k_d_1_1),
"mean_trimean_min_outer_1": np.mean(k_d_1_2),
"mean_trimean_min_inner_2": np.mean(k_d_2_2),
"mean_trimean_min_outer_2": np.mean(k_d_2_1),
"mean_min_min_inner_1": np.mean(k_d_1_1),
"mean_min_min_outer_1": np.mean(k_d_1_2),
"mean_min_min_inner_2": np.mean(k_d_2_2),
"mean_min_min_outer_2": np.mean(k_d_2_1),
"std_n_cluster_1": np.std(n_cluster_1s),
"std_n_cluster_2": np.std(n_cluster_2s),
"std_euclidean_1": np.std(distance_clustering_1s),
"std_euclidean_2": | np.std(distance_clustering_2s) | numpy.std |
import numpy as np
from math import exp
import copy
def net(W, E):
return | np.matmul(W, E) | numpy.matmul |
import math
import numpy as np
import tensorflow as tf
import utils as ut
from matplotlib.patches import Ellipse
BATCHSIZE = 10000
K = 30
MAX_ITER = 800
LR = 0.1
COLOR_LIST = ['r', 'g', 'b', 'y', 'm', 'k']
IS_VALID = False
# data = np.load('data/data2D.npy')
data = np.load('data/data100D.npy')
print(data.shape)
DIM = data.shape[1]
NUM_PTS = data.shape[0]
NUM_VALID = int(math.floor(NUM_PTS / 3.0))
if IS_VALID:
npr = np.random.RandomState(1234)
data_idx = npr.permutation(NUM_PTS)
val_data = data[data_idx[:NUM_VALID]]
data = data[data_idx[NUM_VALID:]]
def log_pdf_mix_gaussian(X, mu, sigma, log_pi):
""" log pdf of mixture gaussian with covariance sigma * I
Args:
X: B X D
mu: K X D
sigma: K X 1
log_pi: K X 1
Returns:
log likelihood
"""
Pi = tf.constant(float(np.pi))
sigma_2 = tf.transpose(tf.square(sigma)) # K X 1
diff = ut.pdist(X, mu) # B X K
log_likelihood = diff / sigma_2 # B X K
log_likelihood += DIM * tf.log(2 * Pi)
log_likelihood += DIM * tf.log(sigma_2)
log_likelihood *= -0.5
log_likelihood += tf.transpose(log_pi)
log_joint_prob = log_likelihood
log_likelihood = ut.reduce_logsumexp(log_likelihood, keep_dims=True) # B x 1
log_posterior = log_joint_prob - log_likelihood
return tf.reduce_sum(log_likelihood), log_posterior
graph = tf.Graph()
with graph.as_default():
inputPL = tf.placeholder(tf.float32, shape=(None, DIM))
## Initialization
mu = tf.Variable(tf.truncated_normal([K, DIM]))
sigma = tf.Variable(tf.truncated_normal([K, 1]))
pi = tf.Variable(tf.random_uniform([K]))
pi_normalize = tf.nn.softmax(pi)
log_pi = ut.logsoftmax(pi)
## compute the log prob
log_pdf, log_posterior = log_pdf_mix_gaussian(inputPL, mu, sigma, log_pi)
optimizer = tf.train.AdamOptimizer(
LR, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(-log_pdf)
train_loss = []
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
for i in range(MAX_ITER):
_, pdf_val, mu_np, sigma_np, pi_np, log_posterior_np = session.run(
[optimizer, log_pdf, mu, sigma, pi_normalize, log_posterior],
feed_dict={inputPL: data})
train_loss += [-pdf_val]
print('Iter {:07d}: log likelihood = {}'.format(i + 1, pdf_val))
if ((i + 1) % 50) == 0 or i == 0:
import pylab as plt
fig = plt.figure()
ax = fig.add_subplot(111)
if not IS_VALID:
# plt.scatter(data[:, 0], data[:, 1], c='c')
# plt.scatter(
# mu_np[:, 0], mu_np[:, 1], marker='s', c=COLOR_LIST[:K], s=10)
# # draw 1 std contour
# ells = [
# Ellipse(
# xy=mu_np[ii, :2],
# width=2 * np.sqrt(sigma_np[ii]**2),
# height=2 * np.sqrt(sigma_np[ii]**2)) for ii in xrange(K)
# ]
# for ii, ee in enumerate(ells):
# ax.add_artist(ee)
# ee.set_clip_box(ax.bbox)
# ee.set_alpha(0.5)
# ee.set_facecolor(COLOR_LIST[ii % len(COLOR_LIST)])
for ii in xrange(K):
idx = np.argmax(log_posterior_np, axis=1) == ii
plt.scatter(data[idx, 0], data[idx, 1], c=COLOR_LIST[ii % len(COLOR_LIST)])
plt.scatter(mu_np[ii, 0], mu_np[ii, 1], marker='s', c='c', s=80)
else:
loss_np = session.run([log_pdf], feed_dict={inputPL: val_data})
for ii in xrange(K):
idx = | np.argmax(log_posterior_np, axis=1) | numpy.argmax |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(221, 'P m -3 m', transformations)
space_groups[221] = sg
space_groups['P m -3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(222, 'P n -3 n :2', transformations)
space_groups[222] = sg
space_groups['P n -3 n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(223, 'P m -3 n', transformations)
space_groups[223] = sg
space_groups['P m -3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = | N.array([1,1,1]) | numpy.array |
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Project
# Licensed under the MIT License
import warnings
import pytest
import numpy as np
from copy import deepcopy
import os
import sys
import shutil
from scipy import constants, interpolate
from pyuvdata import UVCal, UVData
from hera_sim.interpolators import Beam
from hera_sim import DATA_PATH as HS_DATA_PATH
from hera_sim import noise
from uvtools import dspec
from hera_cal import io, datacontainer
from hera_cal import vis_clean
from hera_cal.vis_clean import VisClean
from hera_cal.data import DATA_PATH
from hera_cal import frf
import glob
import copy
# test flagging utility funtions
def test_truncate_flagged_edges():
Nfreqs = 64
Ntimes = 60
data_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in = np.abs(data_in).astype(float)
data_in = data_in + .3j * data_in
# flag channel 30
weights_in[:, 30] = 0.
# flag last channel
weights_in[:, -1] = 0.
# flag last two integrations
weights_in[-2:, :] = 0.
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
# test freq truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, freqs, ax='freq')
assert np.all(np.isclose(xout, freqs[:-1]))
assert np.all(np.isclose(dout, data_in[:, :-1]))
assert np.all(np.isclose(wout, weights_in[:, :-1]))
assert edges == [(0, 1)]
# test time truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, times, ax='time')
assert np.all(np.isclose(xout, times[:-2]))
assert np.all(np.isclose(dout, data_in[:-2, :]))
assert np.all(np.isclose(wout, weights_in[:-2, :]))
assert edges == [(0, 2)]
# test truncating both.
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, (times, freqs), ax='both')
assert np.all(np.isclose(xout[0], times[:-2]))
assert np.all(np.isclose(xout[1], freqs[:-1]))
assert np.all(np.isclose(dout, data_in[:-2, :-1]))
assert np.all(np.isclose(wout, weights_in[:-2, :-1]))
assert edges == [[(0, 2)], [(0, 1)]]
def test_restore_flagged_edges():
Nfreqs = 64
Ntimes = 60
data_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in = np.abs(data_in).astype(float)
data_in = data_in + .3j * data_in
# flag channel 30
weights_in[:, 30] = 0.
# flag last channel
weights_in[:, -1] = 0.
# flag last two integrations
weights_in[-2:, :] = 0.
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
# test freq truncation
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, freqs, ax='freq')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges)
assert np.allclose(weights_in[:, :-1], wrest[:, :-1])
assert np.allclose(wrest[:, -1], 0.0)
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, times, ax='time')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges, ax='time')
assert np.allclose(wout, wrest[:-2, :])
assert np.allclose(wrest[-2:, :], 0.0)
xout, dout, wout, edges, _ = vis_clean.truncate_flagged_edges(data_in, weights_in, (times, freqs), ax='both')
wrest = vis_clean.restore_flagged_edges(xout, wout, edges, ax='both')
assert np.allclose(wrest[-2:, :], 0.0)
assert np.allclose(wrest[:, -1], 0.0)
assert np.allclose(wout, wrest[:-2, :-1])
def test_find_discontinuity_edges():
assert vis_clean.find_discontinuity_edges([0, 1, 4, 9]) == [(0, 2), (2, 3), (3, 4)]
assert vis_clean.find_discontinuity_edges([0, 1, 2, 4, 5, 6, 7, 9, 11, 12]) == [(0, 3), (3, 7), (7, 8), (8, 10)]
def test_flag_rows_with_flags_within_edge_distance():
Nfreqs = 64
Ntimes = 60
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in[32, 2] = 0.
weights_in[33, 12] = 0.
weights_in[2, 30] = 0.
weights_in[-10, 20] = 0.
freqs = np.arange(Nfreqs) * 100e3
# under the above flagging pattern
# freq flagging with min_flag_edge_distance=2 yields 32nd integration flagged only.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='freq')
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# extending edge_distance to 12 should yield 33rd integration being flagged as well.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=13, ax='freq')
for i in range(wout.shape[0]):
if i == 32 or i == 33:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# now do time axis. 30th channel should be flagged for this case.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='time')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# 30th and 20th channels should end up flagged for this case.
times = np.arange(Ntimes) * 10.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(times, weights_in, min_flag_edge_distance=11, ax='time')
for i in range(wout.shape[1]):
if i == 30 or i == 20:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# now do both
wout = vis_clean.flag_rows_with_flags_within_edge_distance([times, freqs], weights_in, min_flag_edge_distance=(3, 3), ax='both')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
def test_flag_rows_with_flags_within_edge_distance_with_breaks():
Nfreqs = 64
Ntimes = 60
freqs = np.hstack([np.arange(23), 30 + np.arange(24), 58 + np.arange(17)]) * 100e3 + 150e6 # freq axis with discontinuities at 23 and 47 integrations.
times = np.hstack([np.arange(20) * 11., 41 * 11. + np.arange(27) * 11., 200 * 11. + np.arange(13) * 11.]) # time axis with discontinuities at 29 abd 47 integrations
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
# frequency direction and time direction separately.
weights_in[2, 30] = 0. # time 2 should not get flagged
weights_in[21, 48] = 0. # time 21 should get flagged
weights_in[55, 46] = 0. # time 55 should get flagged
weights_in[25, -2] = 0. # time 25 should get flagged
wout = vis_clean.flag_rows_with_flags_within_edge_distance(freqs, weights_in, min_flag_edge_distance=3, ax='freq')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=1))[0]) == [21, 25, 55]
weights_in[22, 30] = 0. # channel 30 should be flagged
# channel 48 will also be flagged.
wout = vis_clean.flag_rows_with_flags_within_edge_distance(times, weights_in, min_flag_edge_distance=3, ax='time')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=0))[0]) == [30, 48]
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
# both directions
weights_in[22, 30] = 0. # time 2 should not get flagged
weights_in[55, 46] = 0. # time 55 should get flagged
weights_in[25, -2] = 0. # time 25 should get flagged
weights_in[22, 30] = 0. # channel 30 should be flagged
wout = vis_clean.flag_rows_with_flags_within_edge_distance([times, freqs], weights_in, min_flag_edge_distance=[2, 3], ax='both')
assert list(np.where(np.all(np.isclose(wout, 0.), axis=0))[0]) == [30]
assert list(np.where(np.all(np.isclose(wout, 0.), axis=1))[0]) == [25, 55]
def test_flag_rows_with_contiguous_flags():
Nfreqs = 64
Ntimes = 60
weights_in = np.outer(np.arange(1, Ntimes + 1), np.arange(1, Nfreqs + 1))
weights_in[32, 2:12] = 0.
weights_in[35, 12:14] = 0.
weights_in[2:12, 30] = 0.
weights_in[-10:-8, 20] = 0.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=8, ax='freq')
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# extending edge_distance to 12 should yield 33rd integration being flagged as well.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=2, ax='freq')
for i in range(wout.shape[0]):
if i == 32 or i == 35:
assert np.all(np.isclose(wout[i], 0.0))
else:
assert np.all(np.isclose(wout[i], weights_in[i]))
# now do time axis. 30th channel should be flagged for this case.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=8, ax='time')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# 30th and 20th channels should end up flagged for this case.
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=2, ax='time')
for i in range(wout.shape[1]):
if i == 30 or i == 20:
assert np.all(np.isclose(wout[:, i], 0.0))
else:
assert np.all(np.isclose(wout[:, i], weights_in[:, i]))
# now do both
wout = vis_clean.flag_rows_with_contiguous_flags(weights_in, max_contiguous_flag=(3, 3), ax='both')
for i in range(wout.shape[1]):
if i == 30:
assert np.all(np.isclose(wout[:, i], 0.0))
for i in range(wout.shape[0]):
if i == 32:
assert np.all(np.isclose(wout[i], 0.0))
def test_get_max_contiguous_flag_from_filter_periods():
Nfreqs = 64
Ntimes = 60
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
filter_centers = [[0.], [0.]]
filter_half_widths = [[1 / (3. * 10)], [1 / (100e3 * 2)]]
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods(freqs, filter_centers[1], filter_half_widths[1])
assert mcf == 2
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods(times, filter_centers[0], filter_half_widths[0])
assert mcf == 3
mcf = vis_clean.get_max_contiguous_flag_from_filter_periods((times, freqs), filter_centers, filter_half_widths)
assert tuple(mcf) == (3, 2)
# test assertion errors
pytest.raises(ValueError, vis_clean.get_max_contiguous_flag_from_filter_periods, [1.], [0.], [.5])
pytest.raises(ValueError, vis_clean.get_max_contiguous_flag_from_filter_periods, [[1.], [0.]], [[0.], [0.]], [[.5], [.5]])
def test_flag_model_rms():
Nfreqs = 64
Ntimes = 60
times = np.arange(60) * 10.
freqs = np.arange(64) * 100e3
w = np.ones((Ntimes, Nfreqs), dtype=bool)
d = np.random.randn(Ntimes, Nfreqs) * 1e-3 + 1j * np.random.randn(Ntimes, Nfreqs) * 1e-3
d += np.ones_like(d) * 100
d[30, 12] = 3.12315132e6
w[30, 12] = 0.
mdl = np.ones_like(d) * 100
mdl[30, 24] = 1e6
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='freq')
for i in range(Ntimes):
if i == 30:
assert np.all(skipped[i])
else:
assert np.all(~skipped[i])
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='time')
for i in range(Ntimes):
if i == 24:
assert np.all(skipped[:, i])
else:
assert np.all(~skipped[:, i])
skipped = np.zeros_like(mdl, dtype=bool)
skipped = vis_clean.flag_model_rms(skipped, d, w, mdl, ax='both')
for i in range(Nfreqs):
if i == 24:
assert np.all(skipped[:, i])
else:
assert ~np.all(skipped[:, i])
for i in range(Ntimes):
if i == 30:
assert np.all(skipped[i])
else:
assert ~np.all(skipped[i])
@pytest.mark.filterwarnings("ignore:The default for the `center` keyword has changed")
@pytest.mark.filterwarnings("ignore:It seems that the latitude and longitude are in radians")
class Test_VisClean(object):
def test_init(self):
# test basic init
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(24, 25, 'ee')])
assert hasattr(V, 'data')
assert hasattr(V, 'antpos')
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test basic init w/ uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
assert not hasattr(V, 'data')
V.read(bls=[(13, 14, 'ee')])
assert set(V.hd.ant_1_array) == set([13])
assert isinstance(V.hd, io.HERAData)
assert isinstance(V.hd.data_array, np.ndarray)
# test input cal
fname = os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA')
uvc = io.HERACal(os.path.join(DATA_PATH, 'zen.2458043.12552.xx.HH.uvORA.abs.calfits'))
gains, _, _, _ = uvc.read()
V1 = VisClean(fname, filetype='miriad')
bl = (52, 53, 'ee')
V1.read(bls=[bl])
V2 = VisClean(fname, filetype='miriad', input_cal=uvc)
V2.read(bls=[bl])
g = gains[(bl[0], 'Jee')] * gains[(bl[1], 'Jee')].conj()
assert np.allclose((V1.data[bl] / g)[30, 30], V2.data[bl][30, 30])
V2.apply_calibration(V2.hc, unapply=True)
assert np.allclose(V1.data[bl][30, 30], V2.data[bl][30, 30], atol=1e-5)
# test soft copy
V1.hello = 'hi'
V1.hello_there = 'bye'
V1.foo = 'bar'
V3 = V1.soft_copy(references=["hello*"])
assert hex(id(V1.data[(52, 53, 'ee')])) == hex(id(V3.data[(52, 53, 'ee')]))
assert hasattr(V3, 'hello')
assert hasattr(V3, 'hello_there')
assert not hasattr(V3, 'foo')
assert V3.__class__ == VisClean
# test clear
V1.clear_containers()
assert np.all([len(getattr(V1, c)) == 0 for c in ['data', 'flags', 'nsamples']])
V2.clear_calibration()
assert not hasattr(V2, 'hc')
@pytest.mark.filterwarnings("ignore:Selected polarization values are not evenly spaced")
def test_read_write(self):
# test read data can be turned off for uvh5
fname = os.path.join(DATA_PATH, 'zen.2458098.43124.subband.uvh5')
V = VisClean(fname, filetype='uvh5')
V.read(read_data=False)
assert set(V.hd.ant_1_array) == set([1, 11, 12, 13, 14])
# test read-write-read
V.read()
V.write_data(V.data, "./ex.uvh5", overwrite=True, filetype='uvh5', extra_attrs=dict(vis_units='Jy'))
V2 = VisClean("./ex.uvh5", filetype='uvh5')
V2.read()
assert V2.hd.vis_units == 'Jy'
assert 'Thisfilewasproducedbythefunction' in V2.hd.history.replace('\n', '').replace(' ', '')
V.hd.history, V2.hd.history, V2.hd.vis_units = '', '', V.hd.vis_units
if hasattr(V.hd, "filename"):
# make sure filename attributes are what we're expecting
assert V.hd.filename == ["zen.2458098.43124.subband.uvh5"]
assert V2.hd.filename == ["ex.uvh5"]
V.hd.filename = V2.hd.filename
assert V.hd == V2.hd
os.remove("./ex.uvh5")
# exceptions
pytest.raises(ValueError, V.write_data, V.data, 'foo', filetype='what')
# test write on subset of data
V.read(read_data=True)
data = datacontainer.DataContainer(dict([(k, V.data[k]) for k in list(V.data.keys())[:2]]))
V.write_data(data, "ex.uvh5", overwrite=True, filetype='uvh5')
assert os.path.exists("ex.uvh5")
os.remove('ex.uvh5')
def test_fourier_filter(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# test arg errors
k = (24, 25, 'ee')
fc = [0.]
fw = [100e-9]
ff = [1e-9]
fwt = [1e-3]
assert pytest.raises(ValueError, V.fourier_filter, keys=[k], overwrite=True,
filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='height', mode='dayenu', fitting_options=None)
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff,
ax='freq', mode='dayenu', output_prefix='clean', zeropad=10, overwrite=True, max_contiguous_edge_flags=20)
# this line is repeated to cover the overwrite skip
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fw, suppression_factors=ff, max_contiguous_edge_flags=20,
ax='freq', mode='dayenu', zeropad=10, output_prefix='clean', overwrite=False)
assert np.all([V.clean_info[k][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[k][(0, V.Nfreqs)]['status']['axis_1']])
# now do a time filter
V.fourier_filter(keys=[k], filter_centers=fc, filter_half_widths=fwt, suppression_factors=ff, overwrite=True,
ax='time', mode='dayenu', zeropad=10, max_contiguous_edge_flags=20)
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], rtol=0., atol=atol))
# raise errors.
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff],
mode='dayenu', zeropad=0, overwrite=True)
assert pytest.raises(ValueError, V.fourier_filter, filter_centers=[fc, fc], ax='both',
filter_half_widths=[fwt, fw], suppression_factors=[ff, ff], overwrite=True,
mode='dayenu', zeropad=['Mathematical Universe', 'Crazy Universe'])
# check 2d filter.
V.fourier_filter(filter_centers=[fc, fc],
filter_half_widths=[fwt, fw],
suppression_factors=[ff, ff],
mode='dayenu', overwrite=True,
zeropad=[20, 10], ax='both', max_contiguous_edge_flags=100)
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], rtol=0., atol=atol)
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dayenu(self):
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dayenu')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
# had to set atol=1e-6 here so it won't fail on travis (it runs fine on my laptop). There are some funny
# numpy issues.
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
assert np.all([V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert pytest.raises(AssertionError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate=None, mode='dayenu')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dayenu')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dayenu')
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.)
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.allclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.)
# check whether dayenu filtering axis 1 and then axis 0 is the same as dayenu filtering axis 1 and then filtering the resid.
# note that filtering axis orders do not commute, we filter axis 1 (foregrounds) before filtering cross-talk.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, max_frate=1.0, output_prefix='clean1', mode='dayenu')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, data=V.clean1_resid, output_prefix='clean0', mode='dayenu')
assert np.all(np.isclose(V.clean_resid[(24, 25, 'ee')], V.clean0_resid[(24, 25, 'ee')]))
@pytest.mark.filterwarnings("ignore:.*dspec.vis_filter will soon be deprecated")
def test_vis_clean_dpss(self):
# Relax atol=1e-6 for clean_data and data equalities. there may be some numerical
# issues going on. Notebook tests show that distributing minus signs has
# consequences.
fname = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
V = VisClean(fname, filetype='uvh5')
V.read()
# most coverage is in dspec. Check that args go through here.
# similar situation for test_vis_clean.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=True, mode='dpss_leastsq')
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
assert np.all([V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] == 'success' for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
assert pytest.raises(AssertionError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', mode='dpss_leastsq')
assert pytest.raises(ValueError, V.vis_clean, keys=[(24, 25, 'ee')], ax='time', max_frate='arglebargle', mode='dpss_leastsq')
# cover no overwrite = False skip lines.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='freq', overwrite=False, mode='dpss_leastsq')
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='time', overwrite=True, max_frate=1.0, mode='dpss_leastsq')
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][0] == 'skipped'
assert V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_0'][3] == 'success'
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax='both', overwrite=True, max_frate=1.0, mode='dpss_leastsq')
assert np.all(['success' == V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1'][i] for i in V.clean_info[(24, 25, 'ee')][(0, V.Nfreqs)]['status']['axis_1']])
# check that clean resid is equal to zero in flagged channels
assert np.all(V.clean_resid[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_resid[(24, 25, 'ee')][~(V.clean_flags[(24, 25, 'ee')] | V.flags[(24, 25, 'ee')])] != 0.)
assert np.all(V.clean_model[(24, 25, 'ee')][V.clean_flags[(24, 25, 'ee')]] == 0.)
assert np.any(V.clean_model[(24, 25, 'ee')][~V.clean_flags[(24, 25, 'ee')]] != 0.)
# check that filtered_data is the same in channels that were not flagged
atol = 1e-6 * np.mean(np.abs(V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')]]) ** 2.) ** .5
assert np.all(np.isclose(V.clean_data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]],
V.data[(24, 25, 'ee')][~V.flags[(24, 25, 'ee')] & ~V.clean_flags[(24, 25, 'ee')]], atol=atol, rtol=0.))
# run with flag_model_rms_outliers
for ax in ['freq', 'time', 'both']:
for k in V.flags:
V.flags[k][:] = False
V.data[k][:] = np.random.randn(*V.data[k].shape) + 1j * np.random.randn(*V.data[k].shape)
# run with rms threshold < 1 which should lead to everything being flagged.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax=ax, overwrite=True,
max_frate=1.0, mode='dpss_leastsq', flag_model_rms_outliers=True, model_rms_threshold=0.1)
for k in [(24, 25, 'ee'), (24, 25, 'ee')]:
assert np.all(V.clean_flags[k])
# now use a threshold which should not lead to any flags.
V.vis_clean(keys=[(24, 25, 'ee'), (24, 25, 'ee')], ax=ax, overwrite=True,
max_frate=1.0, mode='dpss_leastsq', flag_model_rms_outliers=True, model_rms_threshold=1e6)
for k in [(24, 25, 'ee'), (24, 25, 'ee')]:
assert not np.any(V.clean_flags[k])
def test_vis_clean_flag_options(self, tmpdir):
# tests for time and frequency partial flagging.
tmp_path = tmpdir.strpath
template = os.path.join(DATA_PATH, "zen.2458043.40141.xx.HH.XRAA.uvh5")
# first run flagging channels and frequencies
fname_edgeflags = os.path.join(tmp_path, "zen.2458043.40141.xx.HH.XRAA.edgeflags.uvh5")
fname_flagged = os.path.join(tmp_path, "zen.2458043.40141.xx.HH.XRAA.allflags.uvh5")
hdt = io.HERAData(template)
d, f, n = hdt.read()
for k in d:
f[k][:] = False
f[k][:, 0] = True
f[k][0, :] = True
hdt.update(flags=f)
hdt.write_uvh5(fname_edgeflags)
for k in d:
f[k][:] = True
hdt.update(flags=f)
hdt.write_uvh5(fname_flagged)
V = VisClean(fname_flagged, filetype='uvh5')
V.read()
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True,
skip_flagged_edges=True)
# make sure if no unflagged channels exist, then the clean flags are all flagged.
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True,
skip_contiguous_flags=True)
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='time', overwrite=True,
skip_contiguous_flags=True, max_frate=0.025)
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', overwrite=True,
skip_contiguous_flags=True, max_frate=0.025)
for k in V.clean_flags:
assert np.all(V.clean_flags[k])
# now do file with some edge flags. Make sure the edge flags remain in clean_flags.
V = VisClean(fname_edgeflags, filetype='uvh5')
V.read()
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True,
skip_flagged_edges=True)
for k in V.clean_flags:
if not np.all(V.flags[k]):
assert not np.all(V.clean_flags[k])
assert np.all(V.clean_flags[k][0])
assert np.all(V.clean_flags[k][:, 0])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='time', overwrite=True,
skip_flagged_edges=True, max_frate=0.025)
for k in V.clean_flags:
if not np.all(V.flags[k]):
assert not np.all(V.clean_flags[k])
assert np.all(V.clean_flags[k][0])
assert np.all(V.clean_flags[k][:, 0])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', overwrite=True,
skip_flagged_edges=True, max_frate=0.025)
for k in V.clean_flags:
if not np.all(V.flags[k]):
assert not np.all(V.clean_flags[k])
assert np.all(V.clean_flags[k][0])
assert np.all(V.clean_flags[k][:, 0])
# now try using skip_contiguous flag gaps.
standoff = 1e9 / (np.median(np.diff(V.freqs)))
max_frate = datacontainer.DataContainer({(24, 25, 'ee'): 2. / np.abs(np.median(np.diff(V.times)) * 3.6 * 24.),
(24, 24, 'ee'): 1. / np.abs(2 * np.median(np.diff(V.times)) * 3.6 * 24.)})
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='freq', overwrite=True,
skip_contiguous_flags=True, standoff=standoff)
# with this standoff, all data should be skipped.
assert np.all(V.clean_flags[(24, 25, 'ee')])
assert np.all(V.clean_flags[(24, 24, 'ee')])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='time', overwrite=True,
skip_contiguous_flags=True, max_frate=max_frate)
# this time, should only skip (24, 25, 'ee')
assert np.all(V.clean_flags[(24, 25, 'ee')])
assert not np.all(V.clean_flags[(24, 24, 'ee')])
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', overwrite=True,
skip_contiguous_flags=True, max_frate=max_frate, standoff=standoff)
# now test flagging integrations within edge distance.
# these flags should cause channel 12 to be
# completely flagged if flagging mode is "both".
for k in [(24, 25, 'ee'), (24, 24, 'ee')]:
V.flags[k][:] = False
V.flags[k][12, 0] = True
V.flags[k][-1, 32] = True
V.vis_clean(keys=[(24, 25, 'ee'), (24, 24, 'ee')], ax='both', overwrite=True,
max_frate=0.025, standoff=0.0, min_dly=50.,
skip_if_flag_within_edge_distance=(2, 2), mode='dpss_leastsq')
for k in [(24, 25, 'ee'), (24, 24, 'ee')]:
for i in range(V.Ntimes):
if i == 12:
assert V.clean_info[k][(0, V.Nfreqs)]['status']['axis_1'][i] == 'skipped'
else:
assert not | np.any(V.clean_flags[k][i]) | numpy.any |
# --------------------------------------------------------
# Tensorflow TIN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""
Generating training instance
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import json
import pickle
import random
from random import randint
import tensorflow as tf
import cv2
#from config import cfg # cause error because of "from __future__ import absolute_import"
from ult import config # use absolute import and config.cfg
list_no_inter = [10,24,31,46,54,65,76,86,92,96,107,111,
129,146,160,170,174,186,194,198,208,214,224,232,235,239,
243,247,252,257,264,273,283,290,295,305,313,325,330,336,
342,348,352,356,363,368,376,383,389,393,397,407,414,418,
429,434,438,445,449,453,463,474,483,488,502,506,516,528,
533,538,546,550,558,562,567,576,584,588,595,600]
OBJECT_MASK = [
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1],
[0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0]
]
print("*************data path:*************")
print(config.cfg.DATA_DIR)
print("************************************")
def bbox_trans(human_box_ori, object_box_ori, ratio, size = 64):
human_box = human_box_ori.copy()
object_box = object_box_ori.copy()
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]), max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
ratio = 'height'
else:
ratio = 'width'
# shift the top-left corner to (0,0)
human_box[0] -= InteractionPattern[0]
human_box[2] -= InteractionPattern[0]
human_box[1] -= InteractionPattern[1]
human_box[3] -= InteractionPattern[1]
object_box[0] -= InteractionPattern[0]
object_box[2] -= InteractionPattern[0]
object_box[1] -= InteractionPattern[1]
object_box[3] -= InteractionPattern[1]
if ratio == 'height': # height is larger than width
human_box[0] = 0 + size * human_box[0] / height
human_box[1] = 0 + size * human_box[1] / height
human_box[2] = (size * width / height - 1) - size * (width - 1 - human_box[2]) / height
human_box[3] = (size - 1) - size * (height - 1 - human_box[3]) / height
object_box[0] = 0 + size * object_box[0] / height
object_box[1] = 0 + size * object_box[1] / height
object_box[2] = (size * width / height - 1) - size * (width - 1 - object_box[2]) / height
object_box[3] = (size - 1) - size * (height - 1 - object_box[3]) / height
# Need to shift horizontally
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]), max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
#assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[3] == 63) & (InteractionPattern[2] <= 63)
if human_box[3] > object_box[3]:
human_box[3] = size - 1
else:
object_box[3] = size - 1
shift = size / 2 - (InteractionPattern[2] + 1) / 2
human_box += [shift, 0 , shift, 0]
object_box += [shift, 0 , shift, 0]
else: # width is larger than height
human_box[0] = 0 + size * human_box[0] / width
human_box[1] = 0 + size * human_box[1] / width
human_box[2] = (size - 1) - size * (width - 1 - human_box[2]) / width
human_box[3] = (size * height / width - 1) - size * (height - 1 - human_box[3]) / width
object_box[0] = 0 + size * object_box[0] / width
object_box[1] = 0 + size * object_box[1] / width
object_box[2] = (size - 1) - size * (width - 1 - object_box[2]) / width
object_box[3] = (size * height / width - 1) - size * (height - 1 - object_box[3]) / width
# Need to shift vertically
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]), max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
#assert (InteractionPattern[0] == 0) & (InteractionPattern[1] == 0) & (InteractionPattern[2] == 63) & (InteractionPattern[3] <= 63)
if human_box[2] > object_box[2]:
human_box[2] = size - 1
else:
object_box[2] = size - 1
shift = size / 2 - (InteractionPattern[3] + 1) / 2
human_box = human_box + [0, shift, 0 , shift]
object_box = object_box + [0, shift, 0 , shift]
return np.round(human_box), np.round(object_box)
def Get_next_sp(human_box, object_box):
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]), max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
H, O = bbox_trans(human_box, object_box, 'height')
else:
H, O = bbox_trans(human_box, object_box, 'width')
Pattern = np.zeros((64,64,2))
Pattern[int(H[1]):int(H[3]) + 1,int(H[0]):int(H[2]) + 1,0] = 1
Pattern[int(O[1]):int(O[3]) + 1,int(O[0]):int(O[2]) + 1,1] = 1
return Pattern
def bb_IOU(boxA, boxB):
ixmin = np.maximum(boxA[0], boxB[0])
iymin = np.maximum(boxA[1], boxB[1])
ixmax = np.minimum(boxA[2], boxB[2])
iymax = np.minimum(boxA[3], boxB[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((boxB[2] - boxB[0] + 1.) * (boxB[3] - boxB[1] + 1.) +
(boxA[2] - boxA[0] + 1.) *
(boxA[3] - boxA[1] + 1.) - inters)
overlaps = inters / uni
return overlaps
def Augmented_box(bbox, shape, image_id, augment = 15, break_flag = True):
thres_ = 0.7
box = np.array([0, bbox[0], bbox[1], bbox[2], bbox[3]]).reshape(1,5)
box = box.astype(np.float64)
count = 0
time_count = 0
while count < augment:
time_count += 1
height = bbox[3] - bbox[1]
width = bbox[2] - bbox[0]
height_cen = (bbox[3] + bbox[1]) / 2
width_cen = (bbox[2] + bbox[0]) / 2
ratio = 1 + randint(-10,10) * 0.01
height_shift = randint(-np.floor(height),np.floor(height)) * 0.1
width_shift = randint(-np.floor(width),np.floor(width)) * 0.1
H_0 = max(0, width_cen + width_shift - ratio * width / 2)
H_2 = min(shape[1] - 1, width_cen + width_shift + ratio * width / 2)
H_1 = max(0, height_cen + height_shift - ratio * height / 2)
H_3 = min(shape[0] - 1, height_cen + height_shift + ratio * height / 2)
if bb_IOU(bbox, np.array([H_0, H_1, H_2, H_3])) > thres_:
box_ = np.array([0, H_0, H_1, H_2, H_3]).reshape(1,5)
box = np.concatenate((box, box_), axis=0)
count += 1
if break_flag == True and time_count > 150:
return box
return box
def Generate_action(action_list):
action_ = np.zeros(29)
for GT_idx in action_list:
action_[GT_idx] = 1
action_ = action_.reshape(1,29)
return action_
def Generate_action_HICO(action_list):
action_ = np.zeros(600)
for GT_idx in action_list:
action_[GT_idx] = 1
action_ = action_.reshape(1,600)
return action_
##############################################################################
##############################################################################
# tools added
##############################################################################
##############################################################################
def Generate_action_30(action_list):
action_ = np.zeros(30)
for GT_idx in action_list:
action_[GT_idx] = 1
action_ = action_.reshape(1,30)
return action_
def draw_relation(human_pattern, joints, size = 64):
joint_relation = [[1,3],[2,4],[0,1],[0,2],[0,17],[5,17],[6,17],[5,7],[6,8],[7,9],[8,10],[11,17],[12,17],[11,13],[12,14],[13,15],[14,16]]
color = [0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95]
skeleton = np.zeros((size, size, 1), dtype="float32")
for i in range(len(joint_relation)):
cv2.line(skeleton, tuple(joints[joint_relation[i][0]]), tuple(joints[joint_relation[i][1]]), (color[i]))
# cv2.rectangle(skeleton, (int(human_pattern[0]), int(human_pattern[1])), (int(human_pattern[2]), int(human_pattern[3])), (255))
# cv2.imshow("Joints", skeleton)
# cv2.waitKey(0)
# print(skeleton[:,:,0])
return skeleton
def get_skeleton(human_box, human_pose, human_pattern, num_joints = 17, size = 64):
width = human_box[2] - human_box[0] + 1
height = human_box[3] - human_box[1] + 1
pattern_width = human_pattern[2] - human_pattern[0] + 1
pattern_height = human_pattern[3] - human_pattern[1] + 1
joints = np.zeros((num_joints + 1, 2), dtype='int32')
for i in range(num_joints):
joint_x, joint_y, joint_score = human_pose[3 * i : 3 * (i + 1)]
x_ratio = (joint_x - human_box[0]) / float(width)
y_ratio = (joint_y - human_box[1]) / float(height)
joints[i][0] = min(size - 1, int(round(x_ratio * pattern_width + human_pattern[0])))
joints[i][1] = min(size - 1, int(round(y_ratio * pattern_height + human_pattern[1])))
joints[num_joints] = (joints[5] + joints[6]) / 2
return draw_relation(human_pattern, joints)
def Get_next_sp_with_pose(human_box, object_box, human_pose, num_joints=17):
InteractionPattern = [min(human_box[0], object_box[0]), min(human_box[1], object_box[1]), max(human_box[2], object_box[2]), max(human_box[3], object_box[3])]
height = InteractionPattern[3] - InteractionPattern[1] + 1
width = InteractionPattern[2] - InteractionPattern[0] + 1
if height > width:
H, O = bbox_trans(human_box, object_box, 'height')
else:
H, O = bbox_trans(human_box, object_box, 'width')
Pattern = np.zeros((64,64,2), dtype='float32')
Pattern[int(H[1]):int(H[3]) + 1,int(H[0]):int(H[2]) + 1,0] = 1
Pattern[int(O[1]):int(O[3]) + 1,int(O[0]):int(O[2]) + 1,1] = 1
if human_pose != None and len(human_pose) == 51:
skeleton = get_skeleton(human_box, human_pose, H, num_joints)
else:
skeleton = np.zeros((64,64,1), dtype='float32')
skeleton[int(H[1]):int(H[3]) + 1,int(H[0]):int(H[2]) + 1,0] = 0.05
Pattern = np.concatenate((Pattern, skeleton), axis=2)
return Pattern
##############################################################################
# for vcoco with pose pattern
##############################################################################
def Get_Next_Instance_HO_Neg_pose_pattern_version2(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter%Data_length]
image_id = GT[0][0]
im_file = config.cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= config.cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Human_augmented_solo, Object_augmented, action_HO, action_H, mask_HO, mask_H, binary_label = Augmented_HO_Neg_pose_pattern_version2(GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes_solo']= Human_augmented_solo
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
blobs['H_num'] = len(action_H)
blobs['binary_label'] = binary_label
return blobs
def Augmented_HO_Neg_pose_pattern_version2(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0][0]
GT_count = len(GT)
aug_all = int(Pos_augment / GT_count)
aug_last = Pos_augment - aug_all * (GT_count - 1)
Human_augmented, Object_augmented, action_HO, Human_augmented_solo, action_H = [], [], [], [], []
Pattern = np.empty((0, 64, 64, 3), dtype=np.float32)
for i in range(GT_count - 1):
Human = GT[i][2]
Object = GT[i][3]
Human_augmented_temp = Augmented_box(Human, shape, image_id, aug_all)
Object_augmented_temp = Augmented_box(Object, shape, image_id, aug_all)
Human_augmented_solo.extend(Human_augmented_temp)
length_min = min(len(Human_augmented_temp),len(Object_augmented_temp))
Human_augmented_temp = Human_augmented_temp[:length_min]
Object_augmented_temp = Object_augmented_temp[:length_min]
action_H__temp = Generate_action(GT[i][4])
action_H_temp = action_H__temp
for j in range(length_min - 1):
action_H_temp = np.concatenate((action_H_temp, action_H__temp), axis=0)
action_HO__temp = Generate_action(GT[i][1])
action_HO_temp = action_HO__temp
for j in range(length_min - 1):
action_HO_temp = np.concatenate((action_HO_temp, action_HO__temp), axis=0)
for j in range(length_min):
Pattern_ = Get_next_sp_with_pose(Human_augmented_temp[j][1:], Object_augmented_temp[j][1:], GT[i][5]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Human_augmented.extend(Human_augmented_temp)
Object_augmented.extend(Object_augmented_temp)
action_HO.extend(action_HO_temp)
action_H.extend(action_H_temp)
Human = GT[GT_count - 1][2]
Object = GT[GT_count - 1][3]
Human_augmented_temp = Augmented_box(Human, shape, image_id, aug_last)
Object_augmented_temp = Augmented_box(Object, shape, image_id, aug_last)
Human_augmented_solo.extend(Human_augmented_temp)
length_min = min(len(Human_augmented_temp),len(Object_augmented_temp))
Human_augmented_temp = Human_augmented_temp[:length_min]
Object_augmented_temp = Object_augmented_temp[:length_min]
action_H__temp = Generate_action(GT[GT_count - 1][4])
action_H_temp = action_H__temp
for j in range(length_min - 1):
action_H_temp = np.concatenate((action_H_temp, action_H__temp), axis=0)
action_HO__temp = Generate_action(GT[GT_count - 1][1])
action_HO_temp = action_HO__temp
for j in range(length_min - 1):
action_HO_temp = np.concatenate((action_HO_temp, action_HO__temp), axis=0)
for j in range(length_min):
Pattern_ = Get_next_sp_with_pose(Human_augmented_temp[j][1:], Object_augmented_temp[j][1:], GT[GT_count - 1][5]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Human_augmented.extend(Human_augmented_temp)
Object_augmented.extend(Object_augmented_temp)
action_HO.extend(action_HO_temp)
action_H.extend(action_H_temp)
num_pos = len(Human_augmented)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate((Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1,5)), axis=0)
Object_augmented = np.concatenate((Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1,5)), axis=0)
Pattern_ = Get_next_sp_with_pose(np.array(Neg[2], dtype='float64'), np.array(Neg[3], dtype='float64'), Neg[7]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate((Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1,5)), axis=0)
Object_augmented = np.concatenate((Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1,5)), axis=0)
Pattern_ = Get_next_sp_with_pose(np.array(Neg[2], dtype='float64'), np.array(Neg[3], dtype='float64'), Neg[7]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
num_pos_neg = len(Human_augmented)
mask_HO_ = np.asarray([1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,0,1]).reshape(1,29) # some actions do not have objects
mask_H_ = np.asarray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]).reshape(1,29)
mask_HO = mask_HO_
mask_H = mask_H_
for i in range(num_pos - 1):
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
for i in range(num_pos_neg - num_pos):
action_HO = np.concatenate((action_HO, np.zeros(29).reshape(1,29)), axis=0)
Human_augmented = np.array(Human_augmented, dtype = 'float32')
Human_augmented_solo = np.array(Human_augmented_solo, dtype = 'float32')
Object_augmented = np.array(Object_augmented, dtype = 'float32')
action_HO = np.array(action_HO, dtype = 'int32')
action_H = np.array(action_H, dtype = 'int32')
Pattern = Pattern.reshape( num_pos_neg, 64, 64, 3)
Human_augmented = Human_augmented.reshape( num_pos_neg, 5)
Human_augmented_solo = Human_augmented_solo.reshape( -1, 5)
Object_augmented = Object_augmented.reshape(num_pos_neg, 5)
action_HO = action_HO.reshape(num_pos_neg, 29)
action_H = action_H.reshape( num_pos, 29)
mask_HO = mask_HO.reshape( num_pos_neg, 29)
mask_H = mask_H.reshape( num_pos, 29)
# binary label for vcoco, different from hico, vcoco does not have the no-interaction label, so give it binary label accord to the locations of pos and neg directly
binary_label = np.zeros((num_pos_neg, 2), dtype = 'int32')
for i in range(num_pos):
binary_label[i][0] = 1 # pos is at 0
for i in range(num_pos, num_pos_neg):
binary_label[i][1] = 1 # neg is at 1
return Pattern, Human_augmented, Human_augmented_solo, Object_augmented, action_HO, action_H, mask_HO, mask_H, binary_label
def Get_Next_Instance_HO_spNeg_pose_pattern_version2(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter%Data_length]
image_id = GT[0][0]
im_file = config.cfg.DATA_DIR + '/' + 'v-coco/coco/images/train2014/COCO_train2014_' + (str(image_id)).zfill(12) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= config.cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, binary_label = Augmented_HO_spNeg_pose_pattern_version2(GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['Hsp_boxes'] = Human_augmented_sp
blobs['O_boxes'] = Object_augmented
blobs['gt_class_sp'] = action_sp
blobs['gt_class_HO'] = action_HO
blobs['gt_class_H'] = action_H
blobs['Mask_sp'] = mask_sp
blobs['Mask_HO'] = mask_HO
blobs['Mask_H'] = mask_H
blobs['sp'] = Pattern
blobs['H_num'] = len(action_H)
blobs['binary_label'] = binary_label
return blobs
def Augmented_HO_spNeg_pose_pattern_version2(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
image_id = GT[0][0]
GT_count = len(GT)
aug_all = int(Pos_augment / GT_count)
aug_last = Pos_augment - aug_all * (GT_count - 1)
Human_augmented, Object_augmented, action_HO, action_H = [], [], [], []
Pattern = np.empty((0, 64, 64, 3), dtype=np.float32)
for i in range(GT_count - 1):
Human = GT[i][2]
Object = GT[i][3]
Human_augmented_temp = Augmented_box(Human, shape, image_id, aug_all)
Object_augmented_temp = Augmented_box(Object, shape, image_id, aug_all)
length_min = min(len(Human_augmented_temp),len(Object_augmented_temp))
Human_augmented_temp = Human_augmented_temp[:length_min]
Object_augmented_temp = Object_augmented_temp[:length_min]
action_H__temp = Generate_action(GT[i][4])
action_H_temp = action_H__temp
for j in range(length_min - 1):
action_H_temp = np.concatenate((action_H_temp, action_H__temp), axis=0)
action_HO__temp = Generate_action(GT[i][1])
action_HO_temp = action_HO__temp
for j in range(length_min - 1):
action_HO_temp = np.concatenate((action_HO_temp, action_HO__temp), axis=0)
for j in range(length_min):
Pattern_ = Get_next_sp_with_pose(Human_augmented_temp[j][1:], Object_augmented_temp[j][1:], GT[i][5]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Human_augmented.extend(Human_augmented_temp)
Object_augmented.extend(Object_augmented_temp)
action_HO.extend(action_HO_temp)
action_H.extend(action_H_temp)
Human = GT[GT_count - 1][2]
Object = GT[GT_count - 1][3]
Human_augmented_temp = Augmented_box(Human, shape, image_id, aug_last)
Object_augmented_temp = Augmented_box(Object, shape, image_id, aug_last)
length_min = min(len(Human_augmented_temp),len(Object_augmented_temp))
Human_augmented_temp = Human_augmented_temp[:length_min]
Object_augmented_temp = Object_augmented_temp[:length_min]
action_H__temp = Generate_action(GT[GT_count - 1][4])
action_H_temp = action_H__temp
for j in range(length_min - 1):
action_H_temp = np.concatenate((action_H_temp, action_H__temp), axis=0)
action_HO__temp = Generate_action(GT[GT_count - 1][1])
action_HO_temp = action_HO__temp
for j in range(length_min - 1):
action_HO_temp = np.concatenate((action_HO_temp, action_HO__temp), axis=0)
for j in range(length_min):
Pattern_ = Get_next_sp_with_pose(Human_augmented_temp[j][1:], Object_augmented_temp[j][1:], GT[GT_count - 1][5]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Human_augmented.extend(Human_augmented_temp)
Object_augmented.extend(Object_augmented_temp)
action_HO.extend(action_HO_temp)
action_H.extend(action_H_temp)
action_sp = np.array(action_HO).copy()
num_pos = len(Human_augmented)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate((Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1,5)), axis=0)
Object_augmented = np.concatenate((Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1,5)), axis=0)
Pattern_ = Get_next_sp_with_pose(np.array(Neg[2], dtype='float64'), np.array(Neg[3], dtype='float64'), Neg[7]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
else:
List = random.sample(range(len(Trainval_Neg[image_id])), len(Trainval_Neg[image_id]))
for i in range(Neg_select):
Neg = Trainval_Neg[image_id][List[i]]
Human_augmented = np.concatenate((Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1,5)), axis=0)
Object_augmented = np.concatenate((Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1,5)), axis=0)
Pattern_ = Get_next_sp_with_pose(np.array(Neg[2], dtype='float64'), np.array(Neg[3], dtype='float64'), Neg[7]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
num_pos_neg = len(Human_augmented)
mask_sp_ = np.asarray([1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,0,1]).reshape(1,29)
mask_HO_ = np.asarray([1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,0,1,1,1,1,0,1]).reshape(1,29)
mask_H_ = np.asarray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]).reshape(1,29)
mask_sp = mask_sp_
mask_HO = mask_HO_
mask_H = mask_H_
for i in range(num_pos - 1):
mask_HO = np.concatenate((mask_HO, mask_HO_), axis=0)
mask_H = np.concatenate((mask_H, mask_H_), axis=0)
for i in range(num_pos_neg - 1):
mask_sp = np.concatenate((mask_sp, mask_sp_), axis=0)
for i in range(num_pos_neg - num_pos):
action_sp = np.concatenate((action_sp, np.zeros(29).reshape(1,29)), axis=0) # neg has all 0 in 29 label
Human_augmented = np.array(Human_augmented, dtype = 'float32')
Object_augmented = np.array(Object_augmented, dtype = 'float32')
action_HO = np.array(action_HO, dtype = 'int32')
action_sp = np.array(action_sp, dtype = 'int32')
action_H = np.array(action_H, dtype = 'int32')
Pattern = Pattern.reshape( num_pos_neg, 64, 64, 3)
Human_augmented_sp= Human_augmented.reshape( num_pos_neg, 5)
Human_augmented = Human_augmented[:num_pos].reshape( num_pos, 5)
Object_augmented = Object_augmented[:num_pos].reshape(num_pos, 5)
action_sp = action_sp.reshape(num_pos_neg, 29)
action_HO = action_HO.reshape(num_pos, 29)
action_H = action_H.reshape( num_pos, 29)
mask_sp = mask_sp.reshape(num_pos_neg, 29)
mask_HO = mask_HO.reshape( num_pos, 29)
mask_H = mask_H.reshape( num_pos, 29)
binary_label = np.zeros((num_pos_neg, 2), dtype = 'int32')
for i in range(num_pos):
binary_label[i][0] = 1 # pos is at 0
for i in range(num_pos, num_pos_neg):
binary_label[i][1] = 1 # neg is at 1
return Pattern, Human_augmented_sp, Human_augmented, Object_augmented, action_sp, action_HO, action_H, mask_sp, mask_HO, mask_H, binary_label
##############################################################################
# for hico with pose map
##############################################################################
def Get_Next_Instance_HO_Neg_HICO_pose_pattern_version2(trainval_GT, Trainval_Neg, iter, Pos_augment, Neg_select, Data_length):
GT = trainval_GT[iter%Data_length]
image_id = GT[0][0]
im_file = config.cfg.DATA_DIR + '/' + 'hico_20160224_det/images/train2015/HICO_train2015_' + (str(image_id)).zfill(8) + '.jpg'
im = cv2.imread(im_file)
im_orig = im.astype(np.float32, copy=True)
im_orig -= config.cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_orig = im_orig.reshape(1, im_shape[0], im_shape[1], 3)
Pattern, Human_augmented, Object_augmented, action_HO, num_pos, binary_label = Augmented_HO_Neg_HICO_pose_pattern_version2(GT, Trainval_Neg, im_shape, Pos_augment, Neg_select)
blobs = {}
blobs['image'] = im_orig
blobs['H_boxes'] = Human_augmented
blobs['O_boxes'] = Object_augmented
blobs['gt_class_HO'] = action_HO
blobs['sp'] = Pattern
blobs['H_num'] = num_pos
blobs['binary_label'] = binary_label
return blobs
def Augmented_HO_Neg_HICO_pose_pattern_version2(GT, Trainval_Neg, shape, Pos_augment, Neg_select):
GT_count = len(GT)
aug_all = int(Pos_augment / GT_count)
aug_last = Pos_augment - aug_all * (GT_count - 1)
image_id = GT[0][0]
Human_augmented, Object_augmented, action_HO = [], [], []
Pattern = np.empty((0, 64, 64, 3), dtype=np.float32)
for i in range(GT_count - 1):
Human = GT[i][2]
Object = GT[i][3]
Human_augmented_temp = Augmented_box(Human, shape, image_id, aug_all)
Object_augmented_temp = Augmented_box(Object, shape, image_id, aug_all)
length_min = min(len(Human_augmented_temp),len(Object_augmented_temp))
Human_augmented_temp = Human_augmented_temp[:length_min]
Object_augmented_temp = Object_augmented_temp[:length_min]
action_HO__temp = Generate_action_HICO(GT[i][1])
action_HO_temp = action_HO__temp
for j in range(length_min - 1):
action_HO_temp = np.concatenate((action_HO_temp, action_HO__temp), axis=0)
for j in range(length_min):
Pattern_ = Get_next_sp_with_pose(Human_augmented_temp[j][1:], Object_augmented_temp[j][1:], GT[i][5]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Human_augmented.extend(Human_augmented_temp)
Object_augmented.extend(Object_augmented_temp)
action_HO.extend(action_HO_temp)
Human = GT[GT_count - 1][2]
Object = GT[GT_count - 1][3]
Human_augmented_temp = Augmented_box(Human, shape, image_id, aug_last)
Object_augmented_temp = Augmented_box(Object, shape, image_id, aug_last)
length_min = min(len(Human_augmented_temp),len(Object_augmented_temp))
Human_augmented_temp = Human_augmented_temp[:length_min]
Object_augmented_temp = Object_augmented_temp[:length_min]
action_HO__temp = Generate_action_HICO(GT[GT_count - 1][1])
action_HO_temp = action_HO__temp
for j in range(length_min - 1):
action_HO_temp = np.concatenate((action_HO_temp, action_HO__temp), axis=0)
for j in range(length_min):
Pattern_ = Get_next_sp_with_pose(Human_augmented_temp[j][1:], Object_augmented_temp[j][1:], GT[GT_count - 1][5]).reshape(1, 64, 64, 3)
Pattern = np.concatenate((Pattern, Pattern_), axis=0)
Human_augmented.extend(Human_augmented_temp)
Object_augmented.extend(Object_augmented_temp)
action_HO.extend(action_HO_temp)
num_pos = len(Human_augmented)
if image_id in Trainval_Neg:
if len(Trainval_Neg[image_id]) < Neg_select:
for Neg in Trainval_Neg[image_id]:
Human_augmented = np.concatenate((Human_augmented, np.array([0, Neg[2][0], Neg[2][1], Neg[2][2], Neg[2][3]]).reshape(1,5)), axis=0)
Object_augmented = np.concatenate((Object_augmented, np.array([0, Neg[3][0], Neg[3][1], Neg[3][2], Neg[3][3]]).reshape(1,5)), axis=0)
action_HO = np.concatenate((action_HO, Generate_action_HICO([Neg[1]])), axis=0)
Pattern_ = Get_next_sp_with_pose( | np.array(Neg[2], dtype='float64') | numpy.array |
import os
from math import floor, ceil
from pprint import pprint
import csv
import argparse
import simuran
import pandas as pd
import matplotlib.pyplot as plt
import astropy.units as u
from neurochat.nc_lfp import NLfp
import numpy as np
from scipy.signal import coherence
from skm_pyutils.py_table import list_to_df, df_from_file, df_to_file
from skm_pyutils.py_config import parse_args
import seaborn as sns
from scipy.signal import welch
try:
from lfp_atn_simuran.analysis.lfp_clean import LFPClean
do_analysis = True
except ImportError:
do_analysis = False
from neuronal.decoding import LFPDecoder
def decoding(lfp_array, groups, labels, base_dir):
for group in ["Control", "Lesion (ATNx)"]:
correct_groups = groups == group
lfp_to_use = lfp_array[correct_groups, :]
labels_ = labels[correct_groups]
decoder = LFPDecoder(
labels=labels_,
mne_epochs=None,
features=lfp_to_use,
cv_params={"n_splits": 100},
)
out = decoder.decode()
print(decoder.decoding_accuracy(out[2], out[1]))
print("\n----------Cross Validation-------------")
decoder.cross_val_decode(shuffle=False)
pprint(decoder.cross_val_result)
pprint(decoder.confidence_interval_estimate("accuracy"))
print("\n----------Cross Validation Control (shuffled)-------------")
decoder.cross_val_decode(shuffle=True)
pprint(decoder.cross_val_result)
pprint(decoder.confidence_interval_estimate("accuracy"))
random_search = decoder.hyper_param_search(verbose=True, set_params=False)
print("Best params:", random_search.best_params_)
decoder.visualise_features(output_folder=base_dir, name=f"_{group}")
def main(
excel_location,
base_dir,
plot_individual_sessions,
do_coherence=True,
do_decoding=True,
overwrite=False,
):
# Setup
df = df_from_file(excel_location)
cfg = simuran.parse_config()
delta_min = cfg["delta_min"]
delta_max = cfg["delta_max"]
theta_min = cfg["theta_min"]
theta_max = cfg["theta_max"]
clean_method = cfg["clean_method"]
clean_kwargs = cfg["clean_kwargs"]
window_sec = 0.5
fmin, fmax = 2.0, 40
max_lfp_lengths_seconds = {"start": 20, "choice": (3.5, 0.5), "end": 10}
ituples = df.itertuples()
num_rows = len(df)
no_pass = False
if "passed" not in df.columns:
print("Please add passed as a column to the df.")
no_pass = True
results = []
coherence_df_list = []
base_dir_new = os.path.dirname(excel_location)
here = os.path.dirname(os.path.abspath(__file__))
decoding_loc = out_name = os.path.join(
here, "..", "sim_results", "tmaze", "lfp_decoding.csv"
)
lfp_len = 6
hf = lfp_len // 2
new_lfp = np.zeros(shape=(num_rows // 2, lfp_len))
groups = []
choices = []
pxx_arr = []
oname_coherence = os.path.join(
here, "..", "sim_results", "tmaze", "coherence_full.csv"
)
oname_power_tmaze = os.path.join(
here, "..", "sim_results", "tmaze", "power_tmaze_full.csv"
)
split = os.path.splitext(os.path.basename(excel_location))
o_name_res = os.path.join(
here, "..", "sim_results", "tmaze", split[0] + "_results" + split[1]
)
# Load existing data if instructed to and it exists
os.makedirs(os.path.dirname(oname_coherence), exist_ok=True)
skip = (
(os.path.exists(decoding_loc))
and (not overwrite)
and (os.path.exists(oname_coherence))
)
if skip:
with open(decoding_loc, "r") as f:
csvreader = csv.reader(f, delimiter=",")
for i, row in enumerate(csvreader):
groups.append(row[0])
choices.append(row[1])
vals = row[2:]
new_lfp[i] = np.array([float(v) for v in vals[:lfp_len]])
coherence_df = df_from_file(oname_coherence)
power_df = df_from_file(oname_power_tmaze)
res_df = df_from_file(o_name_res)
## Extract LFP, do coherence, and plot
if not skip:
for j in range(num_rows // 2):
# row1 is the forced movement
row1 = next(ituples)
# row2 is the choice trial
row2 = next(ituples)
# Load the t-maze data
recording_location = os.path.join(base_dir, row1.location)
recording_location = recording_location.replace("--", os.sep)
param_file = os.path.join(here, "..", "recording_mappings", row1.mapping)
recording = simuran.Recording(
param_file=param_file, base_file=recording_location, load=False
)
lfp_clean = LFPClean(method=clean_method, visualise=False)
recording.signals.load()
sig_dict = lfp_clean.clean(
recording, min_f=fmin, max_f=fmax, method_kwargs=clean_kwargs
)["signals"]
x = np.array(sig_dict["SUB"].samples.to(u.mV))
duration = len(x) / 250
y = np.array(sig_dict["RSC"].samples.to(u.mV))
fig, ax = plt.subplots()
fs = sig_dict["SUB"].sampling_rate
# Setup and loading done -- Analyse the t-maze data
if do_coherence:
# Coherence over the whole recording
f, Cxy = coherence(x, y, fs, nperseg=window_sec * 250)
f = f[np.nonzero((f >= fmin) & (f <= fmax))]
Cxy = Cxy[np.nonzero((f >= fmin) & (f <= fmax))]
theta_co = Cxy[np.nonzero((f >= theta_min) & (f <= theta_max))]
delta_co = Cxy[np.nonzero((f >= delta_min) & (f <= delta_max))]
max_theta_coherence_ = np.nanmean(theta_co)
max_delta_coherence_ = np.nanmean(delta_co)
if plot_individual_sessions:
# Used to plot t-maze sessions - mostly for verification
recording.spatial.load()
spatial = recording.spatial.underlying
fig, ax = plt.subplots()
for k_, r in enumerate(
(
row1,
row2,
)
):
if k_ == 0:
trial_type = "forced"
else:
trial_type = "choice"
# Parse out the times
t1, t2, t3 = r.start, r.choice, r.end
# Make sure there are no parsing Incorrect
if t3 > duration:
raise RuntimeError(
"Last time {} greater than duration {}".format(t3, duration)
)
# Convert these times into LFP samples
lfpt1, lfpt2, lfpt3 = (
int(floor(t1 * fs)),
int(ceil(t2 * fs)),
int(ceil(t3 * fs)),
)
# Split the LFP into three parts, the start, choice, and end
lfp_portions = {}
time_dict = {
"start": (lfpt1, lfpt2, lfpt2),
"choice": (lfpt1, lfpt2, lfpt3),
"end": (lfpt2, lfpt3, lfpt3),
}
for k, v in max_lfp_lengths_seconds.items():
max_len = v
start_time = time_dict[k][0]
choice_time = time_dict[k][1]
end_time = time_dict[k][2]
if k == "start":
# If the start bit is longer than max_len, take the last X
# seconds before the choice data
ct = max_lfp_lengths_seconds["choice"][0]
end_time = max(end_time - int(floor(ct * fs)), start_time)
natural_start_time = end_time - max_len * fs
start_time = max(natural_start_time, start_time)
elif k == "choice":
# For the choice, take (max_len[0], max_len[1]) seconds
# of data around the point
left_push = int(floor(v[0] * fs))
right_push = int(ceil(v[1] * fs))
start_time = max(choice_time - left_push, start_time)
end_time = min(choice_time + right_push, end_time)
elif k == "end":
# For the end time, if the end is longer than max_len, take the first X seconds after the choice data
ct = max_lfp_lengths_seconds["choice"][1]
start_time = min(start_time + int(ceil(ct * fs)), end_time)
natural_end_time = start_time + max_len * fs
end_time = min(natural_end_time, end_time)
else:
raise RuntimeError(f"Unsupported key {k}")
# Make sure have at least 1 second
if (end_time - start_time) < fs:
end_time = start_time + fs
if end_time > int(ceil(duration * 250)):
raise RuntimeError(
"End time {} greater than duration {}".format(
end_time, duration
)
)
lfp_portions[k] = (start_time, end_time)
if j % 20 == 0:
print(f"On iteration {j} of {num_rows // 2} -- trial {trial_type}")
for k in lfp_portions.keys():
print(
"{}: {} -- {}".format(
k,
np.array(time_dict[k]) / 250,
np.array(lfp_portions[k]) / 250,
)
)
print("----------------------")
if plot_individual_sessions:
if r.test == "first":
c = "k"
else:
c = "r"
st1, st2 = int(floor(t1 * 50)), int(ceil(t3 * 50))
x_time = spatial.get_pos_x()[st1:st2]
y_time = spatial.get_pos_y()[st1:st2]
c_end = int(floor(t2 * 50))
spat_c = (spatial.get_pos_x()[c_end], spatial.get_pos_y()[c_end])
ax.plot(x_time, y_time, c=c, label=r.test)
ax.plot(spat_c[0], spat_c[1], c="b", marker="x", label="decision")
ax.plot(x_time[0], y_time[0], c="b", marker="o", label="start")
ax.plot(x_time[-1], y_time[-1], c="b", marker=".", label="end")
if do_coherence:
res_dict = {}
# Power
for k, v in lfp_portions.items():
for region, signal in sig_dict.items():
lfpt1, lfpt2 = v
lfp = NLfp()
lfp.set_channel_id(signal.channel)
lfp._timestamp = np.array(
signal.timestamps[lfpt1:lfpt2].to(u.s)
)
lfp._samples = np.array(
signal.samples[lfpt1:lfpt2].to(u.mV)
)
lfp._record_info["Sampling rate"] = signal.sampling_rate
delta_power = lfp.bandpower(
[delta_min, delta_max],
window_sec=window_sec,
band_total=True,
)
theta_power = lfp.bandpower(
[theta_min, theta_max],
window_sec=window_sec,
band_total=True,
)
res_dict["{}-{}_delta".format(region, k)] = delta_power[
"relative_power"
]
res_dict["{}-{}_theta".format(region, k)] = theta_power[
"relative_power"
]
sub_s = sig_dict["SUB"]
rsc_s = sig_dict["RSC"]
x = np.array(sub_s.samples[lfpt1:lfpt2].to(u.mV))
y = np.array(rsc_s.samples[lfpt1:lfpt2].to(u.mV))
f, Cxy = coherence(x, y, fs, nperseg=window_sec * 250, nfft=256)
f = f[np.nonzero((f >= fmin) & (f <= fmax))]
Cxy = Cxy[np.nonzero((f >= fmin) & (f <= fmax))]
if do_decoding:
if k == "choice":
coherence_vals_for_decode = Cxy[
np.nonzero((f >= theta_min) & (f <= theta_max))
]
s, e = (k_) * hf, (k_ + 1) * hf
new_lfp[j, s:e] = coherence_vals_for_decode
theta_co = Cxy[np.nonzero((f >= theta_min) & (f <= theta_max))]
delta_co = Cxy[np.nonzero((f >= delta_min) & (f <= delta_max))]
max_theta_coherence = np.nanmean(theta_co)
max_delta_coherence = np.nanmean(delta_co)
theta_co_peak = Cxy[np.nonzero((f >= 11.0) & (f <= 13.0))]
peak_theta_coherence = np.nanmax(theta_co_peak)
if trial_type == "forced":
final_trial_type = "Forced"
else:
if r.passed.strip().upper() == "Y":
final_trial_type = "Correct"
elif r.passed.strip().upper() == "N":
final_trial_type = "Incorrect"
else:
final_trial_type = "ERROR IN ANALYSIS"
res_list = [
r.location,
r.session,
r.animal,
r.test,
r.passed.strip(),
k,
final_trial_type,
]
res_list += [
res_dict[f"SUB-{k}_delta"],
res_dict[f"SUB-{k}_theta"],
res_dict[f"RSC-{k}_delta"],
res_dict[f"RSC-{k}_theta"],
]
res_list += [max_theta_coherence, max_delta_coherence]
res_list += [
max_theta_coherence_,
max_delta_coherence_,
peak_theta_coherence,
]
if no_pass is False:
group = (
"Control"
if r.animal.lower().startswith("c")
else "Lesion (ATNx)"
)
if do_coherence:
for f_, cxy_ in zip(f, Cxy):
coherence_df_list.append(
(
f_,
cxy_,
r.passed.strip(),
group,
r.test,
r.session,
k,
final_trial_type,
)
)
f_welch, Pxx = welch(
x,
fs=fs,
nperseg=window_sec * 250,
return_onesided=True,
scaling="density",
average="mean",
)
f_welch = f_welch[
np.nonzero((f_welch >= fmin) & (f_welch <= fmax))
]
Pxx = Pxx[
np.nonzero((f_welch >= fmin) & (f_welch <= fmax))
]
# Convert to full scale relative dB (so max at 0)
Pxx_max = np.max(Pxx)
Pxx = 10 * np.log10(Pxx / Pxx_max)
for p_val, f_val in zip(Pxx, f_welch):
pxx_arr.append(
[
f_val,
p_val,
r.passed.strip(),
group,
k,
final_trial_type,
]
)
res_list += [group]
results.append(res_list)
name = os.path.splitext(r.location)[0]
if plot_individual_sessions:
fig2, ax2 = plt.subplots(3, 1)
ax2[0].plot(f, Cxy, c="k")
ax2[1].plot([i / 250 for i in range(len(x))], x, c="k")
ax2[2].plot([i / 250 for i in range(len(y))], y, c="k")
base_dir_new = os.path.dirname(excel_location)
fig2.savefig(
os.path.join(
base_dir_new,
"coherence_{}_{}_{}.png".format(
row1.location, r.session, r.test
),
)
)
plt.close(fig2)
if do_decoding:
groups.append(group)
choices.append(str(r.passed).strip())
if plot_individual_sessions:
ax.invert_yaxis()
ax.legend()
base_dir_new = os.path.dirname(excel_location)
figname = os.path.join(base_dir_new, name) + "_tmaze.png"
fig.savefig(figname, dpi=400)
plt.close(fig)
if do_coherence and not skip:
# Save the results
headers = [
"location",
"session",
"animal",
"test",
"choice",
"part",
"trial",
"SUB_delta",
"SUB_theta",
"RSC_delta",
"RSC_theta",
"Theta_coherence",
"Delta_coherence",
"Full_theta_coherence",
"Full_delta_coherence",
"Peak 12Hz Theta coherence",
"Group",
]
res_df = pd.DataFrame(results, columns=headers)
split = os.path.splitext(os.path.basename(excel_location))
out_name = os.path.join(
here, "..", "sim_results", "tmaze", split[0] + "_results" + split[1]
)
df_to_file(res_df, out_name, index=False)
# Plot difference between pass and fail trials
headers = [
"Frequency (Hz)",
"Coherence",
"Passed",
"Group",
"Test",
"Session",
"Part",
"Trial",
]
coherence_df = list_to_df(coherence_df_list, headers=headers)
df_to_file(coherence_df, oname_coherence, index=False)
power_df = list_to_df(
pxx_arr,
headers=[
"Frequency (Hz)",
"Power (dB)",
"Passed",
"Group",
"Part",
"Trial",
],
)
df_to_file(power_df, oname_power_tmaze, index=False)
if do_coherence or skip:
simuran.set_plot_style()
# res_df["ID"] = res_df["trial"] + "_" + res_df["part"]
res_df = res_df[res_df["part"] == "choice"]
sns.barplot(
data=res_df,
x="trial",
y="Theta_coherence",
hue="Group",
estimator=np.median,
)
plt.xlabel("Trial result")
plt.ylabel("Theta coherence")
plt.tight_layout()
plt.savefig(
os.path.join(here, "..", "sim_results", "tmaze", f"bar--coherence.pdf"),
dpi=400,
)
plt.close("all")
sns.barplot(
data=res_df,
x="trial",
y="Delta_coherence",
hue="Group",
estimator=np.median,
)
plt.xlabel("Trial result")
plt.ylabel("Delta coherence")
plt.tight_layout()
plt.savefig(
os.path.join(
here, "..", "sim_results", "tmaze", f"bar--coherence--delta.pdf"
),
dpi=400,
)
plt.close("all")
for group in ("Control", "Lesion (ATNx)"):
coherence_df_sub = coherence_df[coherence_df["Group"] == group]
power_df_sub = power_df[power_df["Group"] == group]
sns.lineplot(
data=coherence_df_sub,
x="Frequency (Hz)",
y="Coherence",
hue="Part",
style="Trial",
ci=None,
estimator=np.median,
)
plt.ylim(0, 1)
simuran.despine()
plt.savefig(
os.path.join(
here, "..", "sim_results", "tmaze", f"{group}--coherence.pdf"
),
dpi=400,
)
plt.close("all")
sns.lineplot(
data=coherence_df_sub,
x="Frequency (Hz)",
y="Coherence",
hue="Part",
style="Trial",
ci=95,
estimator=np.median,
)
plt.ylim(0, 1)
simuran.despine()
plt.savefig(
os.path.join(
here, "..", "sim_results", "tmaze", f"{group}--coherence_ci.pdf"
),
dpi=400,
)
plt.close("all")
sns.lineplot(
data=power_df_sub,
x="Frequency (Hz)",
y="Power (dB)",
hue="Part",
style="Trial",
ci=95,
estimator=np.median,
)
plt.xlim(0, 40)
simuran.despine()
plt.savefig(
os.path.join(
here, "..", "sim_results", "tmaze", f"{group}--power_ci.pdf"
),
dpi=400,
)
plt.close("all")
# Choice - lesion vs ctrl coherence when Incorrect and correct
power_df["Trial result"] = power_df["Trial"]
power_df_sub_bit = power_df[
(power_df["Part"] == "choice") & (power_df["Trial"] != "Forced")
]
sns.lineplot(
data=power_df_sub_bit,
x="Frequency (Hz)",
y="Power (dB)",
hue="Group",
style="Trial result",
estimator=np.median,
)
simuran.despine()
plt.savefig(
os.path.join(here, "..", "sim_results", "tmaze", "choice_power_ci.pdf"),
dpi=400,
)
plt.close("all")
coherence_df["Trial result"] = coherence_df["Trial"]
coherence_df_sub_bit = coherence_df[
(coherence_df["Part"] == "choice") & (coherence_df["Trial"] != "Forced")
]
sns.lineplot(
data=coherence_df_sub_bit,
x="Frequency (Hz)",
y="Coherence",
hue="Group",
style="Trial result",
ci=95,
estimator=np.median,
)
plt.ylim(0, 1)
simuran.despine()
plt.savefig(
os.path.join(here, "..", "sim_results", "tmaze", "choice_coherence_ci.pdf"),
dpi=400,
)
plt.close("all")
# Try to decode pass and fail trials.
if not os.path.exists(decoding_loc) or overwrite:
with open(decoding_loc, "w") as f:
for i in range(len(groups)):
line = ""
line += f"{groups[i]},"
line += f"{choices[i]},"
for v in new_lfp[i]:
line += f"{v},"
line = line[:-1] + "\n"
f.write(line)
if do_decoding:
groups = | np.array(groups) | numpy.array |
import numpy as np
def softmax(x):
"""Stable softmax"""
x -= np.max(x, axis=0)
e_x = np.exp(x)
return e_x / np.sum(e_x, axis=0)
def get_idx_aug_baseline(LOO_influences):
"""Returns points randomly"""
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=None,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_influence(LOO_influences):
"""Returns points with probability proportional to magnitude of LOO"""
p = np.abs(LOO_influences, dtype=float)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p /= | np.sum(p) | numpy.sum |
#!/usr/bin/env python
"""
Tests of sa_utilities.std_analysis
"""
import numpy
import storm_analysis
import storm_analysis.sa_library.parameters as params
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_analysis.sa_utilities.std_analysis as stdAnalysis
def test_std_analysis_1():
"""
Test zCheck.
"""
# Load 3D parameters.
settings = storm_analysis.getData("test/data/test_3d_3d.xml")
parameters = params.ParametersDAO().initFromFile(settings)
[min_z, max_z] = parameters.getZRange()
assert(abs(min_z + 0.5) < 1.0e-6)
assert(abs(max_z - 0.5) < 1.0e-6)
# Create HDF5 file with localizations and tracks.
zvals = numpy.arange(-1.0, 1.05, 0.2)
peaks = {"category" : numpy.ones(zvals.size, dtype = numpy.int32),
"x" : numpy.zeros(zvals.size),
"z" : zvals}
h5_name = storm_analysis.getPathOutputTest("test_sa_hdf5.hdf5")
storm_analysis.removeFile(h5_name)
with saH5Py.SAH5Py(h5_name, is_existing = False) as h5:
h5.setMovieInformation(256, 256, 10, "XYZZY")
h5.addLocalizations(peaks, 1)
h5.addTracks(peaks)
# Run z check on the file.
stdAnalysis.zCheck(h5_name, parameters)
# Check track and localization categories.
category = numpy.ones(zvals.size, dtype = numpy.int32)
z_mask = (zvals < min_z) | (zvals > max_z)
category[z_mask] = 9
with saH5Py.SAH5Py(h5_name) as h5:
for fnum, locs in h5.localizationsIterator(fields = ["category"]):
assert( | numpy.allclose(locs["category"], category) | numpy.allclose |
import tensorflow as tf
import numpy as np
import numpy.linalg as nl
import utils.general
import skimage.feature
import json
import os
PAF_type = 0
allPAFConnection = [[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17], [1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [1, 19], [19, 8], [19, 11]]),
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
], # PAF type 0 (Original Openpose)
[np.array([[1, 8], [8, 9], [9, 10], [1, 11], [11, 12], [12, 13], [1, 2], [2, 3], [3, 4], [2, 16], [1, 5], [5, 6], [6, 7], [5, 17],
[1, 0], [0, 14], [0, 15], [14, 16], [15, 17], [1, 18], [2, 4], [5, 7], [8, 4], [11, 7], [8, 10], [11, 13]]), # augmented PAF
np.array([[0, 4], [4, 3], [3, 2], [2, 1], [0, 8], [8, 7], [7, 6], [6, 5], [0, 12], [12, 11], [11, 10], [10, 9], [0, 16], [16, 15], [15, 14], [14, 13], [0, 20], [20, 19], [19, 18], [18, 17]])
]] # PAF type 1 (My augmented PAF)
PAFConnection = allPAFConnection[PAF_type]
dist_thresh = 8
if os.path.exists('utils/default_PAF_lengths.json'):
with open('utils/default_PAF_lengths.json', 'r') as f:
default_PAF_length = json.load(f)
def getValidPAF(valid, objtype, PAFdim):
# input "valid": a tensor containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
with tf.variable_scope('getValidPAF'):
assert objtype in (0, 1)
connection = tf.constant(np.repeat(PAFConnection[objtype], PAFdim, axis=0), dtype=tf.int64)
batch_size = valid.get_shape().as_list()[0]
PAF_valid = []
for ib in range(batch_size):
b_valid = valid[ib, :]
assert len(b_valid.get_shape().as_list()) == 1
indexed_valid = tf.gather(b_valid, connection, axis=0)
PAF_valid.append(tf.logical_and(indexed_valid[:, 0], indexed_valid[:, 1]))
PAF_valid = tf.stack(PAF_valid, axis=0)
return PAF_valid
def getValidPAFNumpy(valid, objtype):
# used in testing time
# input "valid": a numpy array containing bool valid/invalid for each channel
# input "objtype": 0 for body, 1 for hand (to select PAFConnection)
assert objtype in (0, 1)
connection = PAFConnection[objtype]
PAF_valid = []
for conn in connection:
connection_valid = valid[conn[0]] and valid[conn[1]]
PAF_valid.append(connection_valid)
PAF_valid = np.array(PAF_valid, dtype=bool)
return PAF_valid
def createPAF(keypoint2d, keypoint3d, objtype, output_size, normalize_3d=True, valid_vec=None):
# objtype: 0: body, 1: hand
# output_size: (h, w)
# keypoint2d: (x, y)
# normalize_3d: if True: set x^2 + y^2 + z^2 = 1; else set x^2 + y^2 = 1
with tf.variable_scope('createPAF'):
assert keypoint2d.get_shape().as_list()[0] == keypoint3d.get_shape().as_list()[0]
assert keypoint2d.get_shape().as_list()[1] == 2
assert keypoint3d.get_shape().as_list()[1] == 3
if valid_vec is None:
valid_vec = tf.ones([keypoint2d.get_shape()[0]], dtype=tf.bool)
h_range = tf.expand_dims(tf.range(output_size[0]), 1)
w_range = tf.expand_dims(tf.range(output_size[1]), 0)
H = tf.cast(tf.tile(h_range, [1, output_size[1]]), tf.float32)
W = tf.cast(tf.tile(w_range, [output_size[0], 1]), tf.float32)
PAFs = []
for ic, conn in enumerate(PAFConnection[objtype]):
AB = keypoint2d[conn[1]] - keypoint2d[conn[0]] # joint 0 - > joint 1
l_AB = tf.sqrt(tf.reduce_sum(tf.square(AB)))
AB = AB / l_AB
dx = W - keypoint2d[conn[0], 0]
dy = H - keypoint2d[conn[0], 1]
dist = tf.abs(dy * AB[0] - dx * AB[1]) # cross product
Xmin = tf.minimum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) - dist_thresh
Xmax = tf.maximum(keypoint2d[conn[0], 0], keypoint2d[conn[1], 0]) + dist_thresh
Ymin = tf.minimum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) - dist_thresh
Ymax = tf.maximum(keypoint2d[conn[0], 1], keypoint2d[conn[1], 1]) + dist_thresh
within_range = tf.cast(W >= Xmin, tf.float32) * tf.cast(W <= Xmax, tf.float32) * tf.cast(H >= Ymin, tf.float32) * tf.cast(H <= Ymax, tf.float32)
within_dist = tf.cast(dist < dist_thresh, tf.float32)
mask = within_range * within_dist
AB3d = (keypoint3d[conn[1]] - keypoint3d[conn[0]])
if normalize_3d:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d)))
else:
scale = tf.sqrt(tf.reduce_sum(tf.square(AB3d[:2])))
AB3d /= scale
AB3d = tf.where(tf.is_nan(AB3d), tf.zeros([3], dtype=tf.float32), AB3d)
cond_valid = tf.logical_and(valid_vec[conn[0]], valid_vec[conn[1]])
connPAF = tf.cond(cond_valid, lambda: tf.tile(tf.expand_dims(mask, 2), [1, 1, 3]) * AB3d, lambda: tf.zeros((output_size[0], output_size[1], 3), dtype=tf.float32))
# create the PAF only when both joints are valid
PAFs.append(connPAF)
concat_PAFs = tf.concat(PAFs, axis=2)
return concat_PAFs
def getColorAffinity(v):
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
summed = RY + YG + GC + CB + BM + MR
v = min(max(v, 0.0), 1.0) * summed
if v < RY:
c = (255., 255. * (v / (RY)), 0.)
elif v < RY + YG:
c = (255. * (1 - ((v - RY) / (YG))), 255., 0.)
elif v < RY + YG + GC:
c = (0. * (1 - ((v - RY) / (YG))), 255., 255. * ((v - RY - YG) / (GC)))
elif v < RY + YG + GC + CB:
c = (0., 255. * (1 - ((v - RY - YG - GC) / (CB))), 255.)
elif v < summed - MR:
c = (255. * ((v - RY - YG - GC - CB) / (BM)), 0., 255.)
elif v < summed:
c = (255., 0., 255. * (1 - ((v - RY - YG - GC - CB - BM) / (MR))))
else:
c = (255., 0., 0.)
return np.array(c)
def plot_PAF(PAF_array):
# return a 3-channel uint8 np array
assert len(PAF_array.shape) == 3
assert PAF_array.shape[2] == 2 or PAF_array.shape[2] == 3
out = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3), dtype=np.uint8)
# 2D PAF: use Openpose Visualization
x = PAF_array[:, :, 0]
y = PAF_array[:, :, 1]
rad = np.sqrt(np.square(x) + np.square(y))
rad = np.minimum(rad, 1.0)
a = np.arctan2(-y, -x) / np.pi
fk = (a + 1.) / 2.
for i in range(PAF_array.shape[0]):
for j in range(PAF_array.shape[1]):
color = getColorAffinity(fk[i, j]) * rad[i, j]
out[i, j, :] = color
if PAF_array.shape[2] == 3:
# also return the average z value (for judge pointing out / in)
# total_rad = np.sqrt(np.sum(np.square(PAF_array), axis=2))
# rz = PAF_array[:, :, 2] / total_rad
# rz[np.isnan(rz)] = 0.0
# rz[total_rad < 0.5] = 0.0
# z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
# z_map[:, :, 0] = 255 * rz * (rz > 0)
# z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
rz = PAF_array[:, :, 2]
z_map = np.zeros((PAF_array.shape[0], PAF_array.shape[1], 3))
z_map[:, :, 0] = 255 * rz * (rz > 0)
z_map[:, :, 1] = 255 * (-rz) * (rz < 0)
z_map = np.maximum(np.minimum(z_map, 255), 0)
return out, z_map.astype(np.uint8)
return out
def plot_all_PAF(PAF_array, PAFdim):
assert PAFdim in (2, 3)
if PAFdim == 2:
assert PAF_array.shape[2] % 2 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::2], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::2], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y], axis=2)
return plot_PAF(total_PAF)
else:
assert PAFdim == 3 and PAF_array.shape[2] % 3 == 0
total_PAF_x = np.sum(PAF_array[:, :, ::3], axis=2)
total_PAF_y = np.sum(PAF_array[:, :, 1::3], axis=2)
total_PAF_z = np.sum(PAF_array[:, :, 2::3], axis=2)
total_PAF = np.stack([total_PAF_x, total_PAF_y, total_PAF_z], axis=2)
return plot_PAF(total_PAF)
def PAF_to_3D(coord2d, PAF, objtype=0):
if objtype == 0:
depth_root_idx = 1 # put neck at 0-depth
else:
assert objtype == 1
depth_root_idx = 0
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
coord3d = np.zeros((coord2d.shape[0], 3), dtype=coord2d.dtype)
coord3d[:, :2] = coord2d
coord3d[depth_root_idx, 2] = 0.0
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0:
if ic in (9, 13):
continue
elif PAF_type == 1:
if ic in (9, 13) or ic >= 20:
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int) # 2 * N
vec3ds = PAF[points[1], points[0], 3 * ic:3 * ic + 3] # note order of y, x in index
vec3d = np.mean(vec3ds, axis=0)
vec3d[np.isnan(vec3d)] = 0.0 # numerical stability
if (A == B).all(): # A and B actually coincides with each other, put the default bone length.
coord3d[conn[1], 0] = A[0]
coord3d[conn[1], 1] = A[1]
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
# find the least square solution of Ax = b
A = np.zeros([3, 2])
A[2, 0] = -1.
A[:, 1] = vec3d
b = coord3d[conn[1]] - coord3d[conn[0]] # by this time the z-value of target joint should be 0
x, _, _, _ = nl.lstsq(A, b, rcond=-1)
if x[1] < 0: # the direction is reversed
if vec3d[2] >= 0:
coord3d[conn[1], 2] = coord3d[conn[0], 2] + default_PAF_length[objtype][ic] # assume that this connection is vertical to the screen
else:
coord3d[conn[1], 2] = coord3d[conn[0], 2] - default_PAF_length[objtype][ic]
else:
coord3d[conn[1], 2] = x[0]
if nl.norm(vec3d) < 0.1 or x[1] < 0: # If there is almost no response, or the direction is reversed, put it zero so that Adam does not fit.
vec3d[:] = 0
vec3d_array.append(vec3d)
return coord3d, np.array(vec3d_array)
def collect_PAF_vec(coord2d, PAF, objtype=0):
assert len(coord2d.shape) == 2 and coord2d.shape[1] == 2
assert len(PAF.shape) == 3 # H, W, C
vec3d_array = []
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0 and ic in (9, 13):
continue
elif PAF_type == 1 and ic in (9, 13): # need the extra PAFs here
continue
A = coord2d[conn[0]]
B = coord2d[conn[1]]
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int) # 2 * N
if 3 * ic < PAF.shape[2]: # to be compatible with old network with only 20 PAFs instead of 23
vec3ds = PAF[points[1], points[0], 3 * ic:3 * ic + 3] # note order of y, x in index
vec3d = np.mean(vec3ds, axis=0)
else:
vec3d = np.zeros((3,))
vec3d[np.isnan(vec3d)] = 0.0 # numerical stability
vec3d_array.append(vec3d)
return np.array(vec3d_array)
def recon_skeleton_PAF(vec3ds, objtype=0):
# reconstruct a skeleton with standard bone length from PAF only
selected_PAF_array = []
if objtype == 0:
coord3d_pred_v = np.zeros([19, 3], dtype=vec3ds.dtype)
root_idx = 1
else:
assert objtype == 1
coord3d_pred_v = np.zeros([21, 3], dtype=vec3ds.dtype)
root_idx = 0
coord3d_pred_v[root_idx] = 0.0
count_vec = 0
for ic, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0 and (ic in (9, 13) or ic >= 21):
continue
elif PAF_type == 1 and ic in (9, 13):
continue
vec = vec3ds[count_vec]
vlength = nl.norm(vec)
assert vlength > 0
if vlength < 0.1: # almost no response, set to 0
vec = np.zeros(3, dtype=vec3ds.dtype)
else:
vec = vec / vlength # unit vector
selected_PAF_array.append(vec)
count_vec += 1
if objtype == 0 and PAF_type == 1 and ic >= 20:
continue
coord3d_pred_v[conn[1]] = coord3d_pred_v[conn[0]] + default_PAF_length[objtype][ic] * vec
return coord3d_pred_v, np.array(selected_PAF_array)
def connection_score_2d(A, B, PAF):
AB = (B - A).astype(np.float32)
if not AB.any():
# A B coincides
return 0.1
AB /= nl.norm(AB.astype(np.float32))
s = PAF.shape
assert len(s) == 3
u = np.linspace(0.0, 1.0, num=11)
v = 1.0 - u
points = (np.outer(A, v) + np.outer(B, u)).astype(int)
vec2ds = PAF[points[1], points[0], :2]
inner_product = np.dot(vec2ds, AB)
return np.mean(inner_product)
def detect_keypoints2d_PAF(scoremaps, PAF, objtype=0, weight_conn=1.0, mean_shift=False, prev_frame=None):
print('PAF_type {}'.format(PAF_type))
if len(scoremaps.shape) == 4:
scoremaps = np.squeeze(scoremaps)
s = scoremaps.shape
assert len(s) == 3, "This function was only designed for 3D Scoremaps."
assert (s[2] < s[1]) and (s[2] < s[0]), "Probably the input is not correct, because [H, W, C] is expected."
num_candidate = 5
local_maxs = []
for i in range(s[2]):
candidates = skimage.feature.peak_local_max(scoremaps[:, :, i], num_peaks=num_candidate)
if candidates.shape[0] < num_candidate:
# if less than that, replicate the first element
if candidates.shape[0] > 0:
candidates = np.concatenate([candidates[0][np.newaxis, :]] * (num_candidate - candidates.shape[0]) + [candidates], axis=0)
else:
candidates = np.zeros((5, 2), dtype=int)
local_maxs.append(candidates)
if objtype == 0:
root_idx = 1 # starting constructing the tree from root_idx
else:
assert objtype == 1
root_idx = 0
joint_idx_list = [root_idx]
candidate_idx_list = [[c] for c in range(num_candidate)]
sum_score_list = [scoremaps[local_maxs[root_idx][c, 0], local_maxs[root_idx][c, 1], root_idx] for c in range(num_candidate)]
if prev_frame is not None:
for c in range(num_candidate):
sum_score_list[c] -= 20 * nl.norm(local_maxs[root_idx][candidate_idx_list[c][0]][::-1] - prev_frame[c]) / (s[0] + s[1])
# dynamic programming
for iconn, conn in enumerate(PAFConnection[objtype]):
if objtype == 0:
if PAF_type == 0:
if iconn in (9, 13) or iconn >= 21: # unused PAF connection
continue
elif PAF_type == 1:
if iconn in (9, 13) or iconn >= 20:
continue
joint_idx_list.append(conn[1])
candidates = local_maxs[conn[1]]
new_candidate_idx_list = []
new_sum_score_list = []
for ican, candidate in enumerate(candidates):
best_sum_score = -np.inf
best_candidate_idx = None
B = candidate[::-1]
for candidate_idx, sum_score in zip(candidate_idx_list, sum_score_list):
parent_idx = conn[0]
parent_candidate_idx = candidate_idx[joint_idx_list.index(parent_idx)]
A = local_maxs[parent_idx][parent_candidate_idx][::-1]
connection_score = connection_score_2d(A, B, PAF[:, :, 3 * iconn:3 * iconn + 3])
new_sum_score = sum_score + scoremaps[candidate[0], candidate[1], conn[1]] + weight_conn * connection_score # TODO
if prev_frame is not None:
new_sum_score -= 20 * nl.norm(prev_frame[conn[1]] - B) / (s[0] + s[1])
if new_sum_score > best_sum_score:
best_sum_score = new_sum_score
best_candidate_idx = candidate_idx
assert best_candidate_idx is not None
new_sum_score_list.append(best_sum_score)
new_candidate_idx_list.append(best_candidate_idx + [ican])
sum_score_list = new_sum_score_list
candidate_idx_list = new_candidate_idx_list
best_candidate_idx = candidate_idx_list[np.argmax(sum_score_list)]
best_candidate_idx_joint_order = np.zeros_like(best_candidate_idx)
best_candidate_idx_joint_order[np.array(joint_idx_list, dtype=int)] = best_candidate_idx
best_candidate = np.array([local_maxs[i][j] for i, j in enumerate(best_candidate_idx_joint_order)])
coord2d = best_candidate[:, ::-1]
if objtype == 0:
assert coord2d.shape[0] == 19 or coord2d.shape[0] == 20
if objtype == 1:
assert coord2d.shape[0] == 21
scores = []
for i in range(coord2d.shape[0]):
scores.append(scoremaps[coord2d[i, 1], coord2d[i, 0], i])
if mean_shift:
dWidth = 3
dHeight = 3
new_coord2d = []
for i in range(coord2d.shape[0]):
x1 = max(coord2d[i, 0] - dWidth, 0)
x2 = min(coord2d[i, 0] + dWidth + 1, s[1])
y1 = max(coord2d[i, 1] - dHeight, 0)
y2 = min(coord2d[i, 1] + dHeight + 1, s[0])
Xmap = np.arange(x1, x2)
Ymap = np.arange(y1, y2)
local_scoremap = scoremaps[y1:y2, x1:x2, i]
gt0 = (local_scoremap > 0)
if gt0.any():
pos_scoremap = gt0 * local_scoremap
xAcc = | np.sum(pos_scoremap * Xmap) | numpy.sum |
import collections
import os
import random
from typing import Deque
import gym
import numpy as np
from cartpoleA2CNN import Actor
from cartpoleA2CNN import Critic
PROJECT_PATH = os.path.abspath("C:/Users/Jan/Dropbox/_Coding/UdemyAI")
MODELS_PATH = os.path.join(PROJECT_PATH, "models")
ACTOR_PATH = os.path.join(MODELS_PATH, "actor_cartpole.h5")
CRITIC_PATH = os.path.join(MODELS_PATH, "critic_cartpole.h5")
class Agent:
def __init__(self, env: gym.Env):
self.env = env
self.num_observations = self.env.observation_space.shape
self.num_actions = self.env.action_space.n
self.num_values = 1
self.gamma = 0.95
self.learning_rate_actor = 1e-3 # 0.001
self.learning_rate_critic = 5e-3 # 0.005
self.actor = Actor(self.num_observations, self.num_actions, self.learning_rate_actor)
self.critic = Critic(self.num_observations, self.num_values, self.learning_rate_critic)
def get_action(self, state: np.ndarray):
policy = self.actor(state)[0]
action = | np.random.choice(self.num_actions, p=policy) | numpy.random.choice |
import itertools
from collections import OrderedDict, Iterable
from functools import wraps
from nltk import flatten
from nltk.corpus import wordnet
from nltk.corpus.reader import Synset
from nltk.stem import PorterStemmer
from overrides import overrides
from xnym_embeddings.dict_tools import balance_complex_tuple_dict, invert_dict
from sklearn.preprocessing import Normalizer
from allennlp.modules.token_embedders.token_embedder import TokenEmbedder
from allennlp.data import Vocabulary
from xnym_embeddings.time_tools import timeit_context
import numpy as np
import torch
from multiprocessing import Pool
#import pywsd
#Wordnet sense disambiguation
def rolling_window_lastaxis(a, window):
"""Directly taken from <NAME> post to numpy-discussion.
<http://www.mail-archive.com/[email protected]/msg29450.html>"""
if window < 1:
raise ValueError ("`window` must be at least 1.")
if window > a.shape[-1]:
raise ValueError ("`window` is too long.")
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def search_in_rowling(M, single_sequence):
return np.where(
np.all
(np.logical_xor(
M == single_sequence,
np.isnan(single_sequence)),
axis=2
))
def last_nonzero(arr, axis, invalid_val=-1):
mask = arr!=0
val = arr.shape[axis] - np.flip(mask, axis=axis).argmax(axis=axis) - 1
return np.where(mask.any(axis=axis), val, invalid_val)
def search_sequence_numpy(arr,seq):
""" Find arrays in arrays at arbitrary position on second axis
Multiple occurrences in a sample are given with recurrent sample indices and other positions in the samples
:param arr: 2d array to look in
:param seq: 2d array to look from; padding with nan allows to compare sequences with minor length
:return: list of tuples of arrays with shape: length of seq * shape[0] of arr * shape[1] of arr
no. of sample positions in samples
"""
# compute strides from samples with length of seqs
len_sequences = seq.shape[1]
M = rolling_window_lastaxis(arr, len_sequences)
# check if they match these smaller sequences
matched_xnyms = list(search_in_rowling(M,s) for s in seq)
# return the index of the matched word, the indices of the samples, where it was found and the positions within these samples
for xnym_index, (sample_indices, position_indices) in enumerate(matched_xnyms):
if len(sample_indices)>0:
yield xnym_index, sample_indices, position_indices
def split_multi_word(word):
return tuple(word.split('-') if '-' in word else word.split('_'))
def parametrized(dec):
def layer(*args, **kwargs):
def repl(f):
return dec(f, *args, **kwargs)
return repl
return layer
wordnet_lookers = {}
@parametrized
def wordnet_looker(fun, kind):
wordnet_lookers[kind] = fun
@wraps(fun)
def aux(*xs, **kws):
return fun(*xs, **kws)
return aux
@wordnet_looker('hyponyms')
def get_hyponyms(synset, depth=0, max_depth=0):
if depth > max_depth:
return set(synset.hyponyms())
hyponyms = set()
for hyponym in synset.hyponyms():
hyponyms |= set(get_hyponyms(hyponym, depth=depth+1))
return hyponyms | set(synset.hyponyms())
@wordnet_looker('cohyponyms')
def get_cohyponyms(synset):
""" Cohyponyms are for exmaple:
Dog, Fish, Insect, because all are animals, as red and blue, because they are colors.
"""
cohyponyms = set()
for hypernym in synset.hypernyms():
cohyponyms |= set(hypernym.hyponyms())
return cohyponyms - set([synset])
@wordnet_looker('cohypernyms')
def get_cohypernyms(synset):
""" Cohypernyms are for exmaple:
A Legal Document and a Testimony are cohypernyms, because what is a Legal Document is possibly not a Testimony and
vice versa, but also that may possibly be the case.
Dog, Fish, Insect are no cohypernyms, because there is no entity, that is at the same time a Dog and a Fisch or an
Insect.
"""
cohypernyms = set()
for hyponym in synset.hyponyms():
cohypernyms |= set(hyponym.hypernyms())
return cohypernyms - set([synset])
@wordnet_looker('hypernyms')
def get_hypernyms(synset):
hypernyms = set()
for hyponym in synset.hypernyms():
hypernyms |= set(get_hypernyms(hyponym))
result_syns = hypernyms | set(synset.hypernyms())
result = set(flatten([list(x.lemmas()) if isinstance(x, Synset) else x for x in result_syns]))
return result
@wordnet_looker('antonyms')
def get_antonyms(synset):
antonyms = set()
new_antonyms = set()
for lemma in synset.lemmas():
new_antonyms |= set(lemma.antonyms())
antonyms |= new_antonyms
for antonym in new_antonyms:
antonyms |= set(flatten([list(x.lemmas()) for x in antonym.synset().similar_tos()]))
return antonyms
@wordnet_looker('synonyms')
def get_synonyms(synset):
synonyms = set(synset.lemmas())
return synonyms
porter = PorterStemmer()
def wordnet_lookup_xnyms (index_to_tokens, fun):
xnym_dict = OrderedDict()
lemma_vocab = set (porter.stem(word) for word in index_to_tokens.values())
for token in lemma_vocab:
xnyms_syns = set()
for syn in wordnet.synsets(token):
xnyms_syns |= fun(syn)
lemmas = set(flatten([list(x.lemmas()) if isinstance(x, Synset) else x for x in xnyms_syns]))
strings = [split_multi_word(x.name()) for x in lemmas]
xnym_dict[(token,)] = strings
return xnym_dict
def numerize(d, token2index):
number_dict = OrderedDict()
for key, val in d.items():
if isinstance(key, Iterable):
new_key = type(key)([token2index[t] for t in key if t in token2index])
else:
new_key = type(key)(token2index[key])
new_vals = []
for var in val:
if isinstance(var, Iterable):
new_val = type(var)([token2index[t] for t in var if t in token2index])
if not new_val:
continue
else:
new_val = type(var)(token2index[var])
new_vals.append(new_val)
if not new_vals or not new_key:
continue
number_dict[new_key] = new_vals
return number_dict
@TokenEmbedder.register("xnym_embedder")
class XnymEmbedder (TokenEmbedder):
"""
Represents a sequence of tokens as a relation based embeddings.
Each sequence gets a vector of length vocabulary size, where the i'th entry in the vector
corresponds to number of times the i'th token in the vocabulary appears in the sequence.
By default, we ignore padding tokens.
Parameters
----------
vocab: ``Vocabulary``
projection_dim : ``int``, optional (default = ``None``)
if specified, will project the resulting bag of positions representation
to specified dimension.
ignore_oov : ``bool``, optional (default = ``False``)
If true, we ignore the OOV token.
"""
def __init__(self,
vocab: Vocabulary,
projection_dim: int = 10,
xnyms:str='antonyms',
normalize=True,
sparse=True,
parallelize=False,
numerize_dict=True):
super(XnymEmbedder, self).__init__()
self.xnyms = xnyms
self.S = None
with timeit_context('creating %s-dict' % self.xnyms):
self.vocab = vocab
self.parallelize = parallelize
xnyms_looker_fun = wordnet_lookers[xnyms]
self.xnym_dict = wordnet_lookup_xnyms(vocab._index_to_token['tokens'], fun=xnyms_looker_fun)
self.xnym_dict[('in', 'common',)] = [('differ',), ('differs',)]
self.xnym_dict[('equivocally',)] = [('univocally',)]
self.xnym_dict[('micronutrients',)] = [('macronutrients',)]
self.xnym_dict = balance_complex_tuple_dict(self.xnym_dict)
if numerize_dict:
self.xnym_dict = numerize(self.xnym_dict, vocab.get_token_to_index_vocabulary())
self.normalize = normalize
self.sparse = sparse
self.output_dim = projection_dim
xnym_keys = list(self.xnym_dict.keys())
length = max(map(len, xnym_keys))
self.xnyms_keys = np.array([list(xi) + [np.nan] * (length - len(xi)) for xi in xnym_keys])
self.xnyms_counterparts = self.generate_xnym_counterparts(self.xnym_dict.values())
self.xnyms_keys_len_groups = [(l, list(g)) for l, g in
itertools.groupby(
sorted(self.xnym_dict.items(),
key=lambda x:len(x[0])),
key=lambda x:len(x[0]))]
#self.xnyms_counterparts_len_groups = [self.generate_xnym_counterparts(group.values()) for group in self.xnyms_keys_len_groups]
def generate_xnym_counterparts(self, values):
xnyms_counterparts = []
xnym_counterpars = list(values)
for ac in xnym_counterpars:
length = max(map(len, ac))
counterparts = np.array([list(xi) + [np.nan] * (length - len(xi)) for xi in ac])
xnyms_counterparts.append(counterparts)
return np.array(xnyms_counterparts)
def position_distance_embeddings(self, input_array):
where_xnyms_match = list(search_sequence_numpy(input_array, self.xnyms_keys))
for x1_index, s1_indices, p1_index in where_xnyms_match:
where_counterpart_matches = list(search_sequence_numpy(input_array[s1_indices], self.xnyms_counterparts[x1_index]))
for _, s2_indices, p2_index in where_counterpart_matches:
both_containing_samples = s1_indices[s2_indices]
both_containing_positions = p1_index[s2_indices]
difference = np.fabs(both_containing_positions - p2_index)
if difference.any():
index_sample_token1 = (both_containing_samples, both_containing_positions)
index_sample_token2 = (s1_indices[s2_indices], p2_index)
occurrences = | np.column_stack(index_sample_token1 + index_sample_token2) | numpy.column_stack |
import matplotlib
matplotlib.use('Agg') # for plotting without GUI
import matplotlib.pyplot as plt
import time
import os
import math
import pandas as pd
import numpy as np
from scipy.stats.stats import pearsonr
import tensorflow as tf
import collections
import scipy.sparse as sp_sparse
import tables
from sklearn.decomposition import PCA
# from sklearn.manifold import TSNE # single core
from MulticoreTSNE import MulticoreTSNE as TSNE # MCORE
# Sys
def usage():
process = psutil.Process(os.getpid())
ram = process.memory_info()[0] / float(2 ** 20)
ram = round(ram, 1)
return ram
# DATA I/O # todo: check gene_id barcode uniqueness
def read_csv(fname):
'''read_csv into pd.df, assuming index_col=0, and header=True'''
print('reading ', fname)
tic = time.time()
df = pd.read_csv(fname, index_col=0)
# print("read matrix: [genes, cells]")
print('shape:', df.shape)
# print(df.axes)
if df.shape[0] > 2 and df.shape[1] > 2:
print(df.ix[0:3, 0:2])
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return df
def read_tsv(fname):
'''read_csv into pd.df, assuming index_col=0, and header=True'''
print('reading ', fname)
tic = time.time()
df = pd.read_csv(fname, index_col=0, delimiter='\t')
# print("read matrix: [genes, cells]")
print('shape:', df.shape)
# print(df.axes)
if df.shape[0] > 2 and df.shape[1] > 2:
print(df.ix[0:3, 0:2])
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return df
def save_csv(arr, fname):
'''if fname=x.csv.gz, will be compressed
if fname=x.csv, will not be compressed'''
tic = time.time()
print('saving: ', arr.shape)
np.savetxt(fname, arr, delimiter=',', newline='\n')
toc = time.time()
print("saving" + fname + " took {:.1f} seconds".format(toc - tic))
def save_hd5(df, out_name):
tic = time.time()
print('saving: ', df.shape)
df.to_hdf(out_name, key='null', mode='w', complevel=9, complib='blosc')
toc = time.time()
print("saving" + out_name + " took {:.1f} seconds".format(toc - tic))
def read_hd5(in_name):
'''
:param in_name:
:return df:
'''
print('reading: ', in_name)
df = pd.read_hdf(in_name)
print('read', df.shape)
# print(df.axes)
if df.shape[0] > 2 and df.shape[1] > 2:
print(df.ix[0:3, 0:2])
return df
GeneBCMatrix = collections.namedtuple(
'GeneBCMatrix',
['gene_ids', 'gene_names', 'barcodes', 'matrix'])
def read_sparse_matrix_from_h5(fname, genome, file_ori):
'''
for 10x_genomics h5 file:
always transpose into cell_row if gene_row is the input
https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/advanced/h5_matrices
:return: cell_row sparse matrix
:param fname:
:param genome:
:return:
'''
tic = time.time()
print('reading {} {}'.format(fname, genome))
with tables.open_file(fname, 'r') as f:
try:
dsets = {}
for node in f.walk_nodes('/' + genome, 'Array'):
dsets[node.name] = node.read()
matrix = sp_sparse.csc_matrix(
(dsets['data'], dsets['indices'], dsets['indptr']),
shape=dsets['shape'])
print('shape is {}'.format(matrix.shape))
if file_ori == 'cell_row':
pass
elif file_ori == 'gene_row':
matrix = matrix.transpose()
else:
raise Exception('file orientation {} not recognized'.format(file_ori))
obj = GeneBCMatrix(dsets['genes'], dsets['gene_names'],
dsets['barcodes'], matrix)
nz_count = len(obj.matrix.nonzero()[0])
nz_rate = nz_count / (obj.matrix.shape[0] * obj.matrix.shape[1])
nz_rate = round(nz_rate, 3)
print('nz_rate is {}'.format(nz_rate))
print('nz_count is {}\n'.format(nz_count))
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return obj
except tables.NoSuchNodeError:
raise Exception("Genome %s does not exist in this file." % genome)
except KeyError:
raise Exception("File is missing one or more required datasets.")
def save_sparse_matrix_to_h5(gbm, filename, genome):
'''
for 10x_genomics h5 file:
https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/advanced/h5_matrices
:return:
:param filename:
:param genome:
:return:
'''
flt = tables.Filters(complevel=1)
print('saving: ', gbm.matrix.shape)
with tables.open_file(filename, 'w', filters=flt) as f:
try:
group = f.create_group(f.root, genome)
f.create_carray(group, 'genes', obj=gbm.gene_ids)
f.create_carray(group, 'gene_names', obj=gbm.gene_names)
f.create_carray(group, 'barcodes', obj=gbm.barcodes)
f.create_carray(group, 'data', obj=gbm.matrix.data)
f.create_carray(group, 'indices', obj=gbm.matrix.indices)
f.create_carray(group, 'indptr', obj=gbm.matrix.indptr)
f.create_carray(group, 'shape', obj=gbm.matrix.shape)
except:
raise Exception("Failed to write H5 file.")
def read_data_into_cell_row(fname, orientation='cell_row', genome='mm10'):
'''
read hd5 or csv, into cell_row format
:param fname:
:param orientation: of file
:return: cell_row df
'''
tic = time.time()
print('reading {} into cell_row data frame'.format(fname))
if fname.endswith('hd5'):
df_tmp = read_hd5(fname)
elif fname.endswith('csv'):
df_tmp = read_csv(fname)
elif fname.endswith('tsv'):
df_tmp = read_tsv(fname)
elif fname.endswith('csv.gz'):
df_tmp = read_csv(fname)
elif fname.endswith('h5'): # not hd5
df_tmp = read_sparse_matrix_from_h5(fname, genome=genome, file_ori=orientation)
print('sparse_matrix have been read')
else:
raise Exception('file name not ending in hd5 nor csv, not recognized')
if orientation == 'gene_row':
df_tmp = df_tmp.transpose()
elif orientation == 'cell_row':
pass
else:
raise Exception('parameter err: for {}, orientation {} not correctly spelled'.format(fname, orientation))
#print("after transpose into cell row (if correct file_orientation provided)")
if fname.endswith('h5'):
print("shape is {}".format(df_tmp.matrix.shape))
else:
print("shape is {}".format(df_tmp.shape))
print('nz_rate is {}'.format(nnzero_rate_df(df_tmp)))
print('nz_count is {}\n'.format(nnzero_count_df(df_tmp)))
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return df_tmp
# PRE-PROCESSING OF DATA FRAMES #
def df_filter(df):
df_filtered = df.loc[(df.sum(axis=1) != 0), (df.sum(axis=0) != 0)]
print("filtered out any rows and columns with sum of zero")
return df_filtered
def df_normalization(df, scale=1e6):
'''
RPM when default
:param df: [gene, cell]
:param scale:
:return:
'''
read_counts = df.sum(axis=0) # colsum
# df_normalized = df.div(read_counts, axis=1).mul(np.median(read_counts)).mul(1)
df_normalized = df.div(read_counts, axis=1).mul(scale)
return df_normalized
def df_log10_transformation(df, pseudocount=1):
'''
log10
:param df:
:param pseudocount:
:return:
'''
df_log10 = np.log10(np.add(df, pseudocount))
return df_log10
def df_rpm_log10(df, pseudocount=1):
'''
log10
:param df: [gene, cell]
:return:
'''
df_tmp = df.copy()
df_tmp = df_normalization(df_tmp)
df_tmp = df_log10_transformation(df_tmp, pseudocount=pseudocount)
return df_tmp
def df_exp_rpm_log10(df, pseudocount=1):
'''
log10
:param df: [gene, cell]
:pseudocount: for exp transformation and log10 transformation
:return:
'''
df_tmp = df.copy()
df_tmp = np.power(10, df_tmp) - pseudocount
df_tmp = df_normalization(df_tmp)
df_tmp = df_log10_transformation(df_tmp, pseudocount=pseudocount)
return df_tmp
def df_exp_discretize_log10(df, pseudocount=1):
'''
For better comparison with ground-truth in gene-scatterplot visualization
Input should be the output of df_log10_transformation (log10(x+1))
If so, all values ≥ 0
1. 10^x-1
2. arount
3. log10(x+1)
:param df:
:param pseudocount:
:return:
'''
df_tmp = df.copy()
df_tmp = np.around(np.power(10, df_tmp) - pseudocount)
df_tmp = np.log10(df_tmp + pseudocount)
return df_tmp
def df_transformation(df, transformation='as_is'):
'''
data_transformation
df not copied
:param df: [genes, cells]
:param format: as_is, log10, rpm_log10, exp_rpm_log10
:return: df_formatted
'''
if transformation == 'as_is':
pass # do nothing
elif transformation == 'log10':
df = df_log10_transformation(df)
elif transformation == 'rpm_log10':
df = df_rpm_log10(df)
elif transformation == 'exp_rpm_log10':
df == df_exp_rpm_log10(df)
else:
raise Exception('format {} not recognized'.format(transformation))
print('data formatting: ', transformation)
return df
def mask_df(df, nz_goal):
'''
:param df: any direction
:param nz_goal:
:return:
'''
df_msked = df.copy()
nz_now = nnzero_rate_df(df)
nz_goal = nz_goal/nz_now
zero_goal = 1-nz_goal
df_msked = df_msked.where(np.random.uniform(size=df.shape) > zero_goal, 0)
return df_msked
def multinormial_downsampling(in_df, libsize_out):
out_df = in_df.copy()
for i in range(len(in_df)):
slice_arr = in_df.values[i, :]
libsize = slice_arr.sum()
p_lst = slice_arr / libsize
slice_resample = np.random.multinomial(libsize_out, p_lst)
out_df.ix[i, :] = slice_resample
return out_df
def split_arr(arr, a=0.8, b=0.1, c=0.1, seed_var=1):
"""input array, output rand split arrays
a: train, b: valid, c: test
e.g.: [arr_train, arr_valid, arr_test] = split(df.values)"""
print(">splitting data")
np.random.seed(seed_var) # for splitting consistency
train_indices = np.random.choice(arr.shape[0], int(round(arr.shape[0] * a // (a + b + c))), replace=False)
remain_indices = np.array(list(set(range(arr.shape[0])) - set(train_indices)))
valid_indices = np.random.choice(remain_indices, int(round(len(remain_indices) * b // (b + c))), replace=False)
test_indices = np.array(list(set(remain_indices) - set(valid_indices)))
np.random.seed() # cancel seed effect
print("total samples being split: ", len(train_indices) + len(valid_indices) + len(test_indices))
print('train:', len(train_indices), ' valid:', len(valid_indices), 'test:', len(test_indices))
arr_train = arr[train_indices]
arr_valid = arr[valid_indices]
arr_test = arr[test_indices]
return (arr_train, arr_valid, arr_test)
def split_df(df, a=0.8, b=0.1, c=0.1, seed_var=1):
"""input df, output rand split dfs
a: train, b: valid, c: test
e.g.: [df_train, df2, df_test] = split(df, a=0.7, b=0.15, c=0.15)"""
np.random.seed(seed_var) # for splitting consistency
train_indices = np.random.choice(df.shape[0], int(df.shape[0] * a // (a + b + c)), replace=False)
remain_indices = np.array(list(set(range(df.shape[0])) - set(train_indices)))
valid_indices = np.random.choice(remain_indices, int(len(remain_indices) * b // (b + c)), replace=False)
test_indices = np.array(list(set(remain_indices) - set(valid_indices)))
np.random.seed() # cancel seed effect
print("total samples being split: ", len(train_indices) + len(valid_indices) + len(test_indices))
print('train:', len(train_indices), ' valid:', len(valid_indices), 'test:', len(test_indices))
df_train = df.ix[train_indices, :]
df_valid = df.ix[valid_indices, :]
df_test = df.ix[test_indices, :]
return df_train, df_valid, df_test
def random_subset_arr(arr, m_max, n_max):
[m, n] = arr.shape
m_reduce = min(m, m_max)
n_reduce = min(n, n_max)
np.random.seed(1201)
row_rand_idx = np.random.choice(m, m_reduce, replace=False)
col_rand_idx = np.random.choice(n, n_reduce, replace=False)
np.random.seed()
arr_sub = arr[row_rand_idx][:, col_rand_idx]
print('matrix from [{},{}] to a random subset of [{},{}]'.
format(m, n, arr_sub.shape[0], arr_sub.shape[1]))
return arr_sub
def subset_df(df_big, df_subset):
return (df_big.ix[df_subset.index, df_subset.columns])
def sparse_matrix_transformation(csr_matrix, transformation='log10'):
'''
data_transformation
df not copied
:param csr_matrix:
:param transformation: as_is, log10
:return:
'''
if transformation == 'as_is':
pass # do nothing
elif transformation == 'log10':
csr_matrix = csr_matrix.log1p()
elif transformation == 'rpm_log10':
raise Exception('rpm_log10 not implemented yet')
elif transformation == 'exp_rpm_log10':
raise Exception('exp_rpm_log10 not implemented yet')
else:
raise Exception('format {} not recognized'.format(transformation))
print('data tranformation: ', transformation)
return csr_matrix
def subsample_matrix(gbm, barcode_indices):
return GeneBCMatrix(gbm.gene_ids, gbm.gene_names,
gbm.barcodes[barcode_indices],
gbm.matrix[:, barcode_indices])
def subgene_matrix(gbm, gene_indices):
return GeneBCMatrix(gbm.gene_ids[gene_indices], gbm.gene_names[gene_indices],
gbm.barcodes,
gbm.matrix[gene_indices, :])
def get_expression(gbm, gene_name):
gene_indices = np.where(gbm.gene_names == gene_name)[0]
if len(gene_indices) == 0:
raise Exception("%s was not found in list of gene names." % gene_name)
return gbm.matrix[gene_indices[0], :].toarray().squeeze()
def split__csr_matrix(csr_matrix, a=0.8, b=0.1, c=0.1, seed_var=1):
"""
input: csr_matrix(cell_row),
output: rand split datasets (train/valid/test)
a: train
b: valid
c: test
e.g. [csr_train, csr_valid, csr_test] = split(df.values)"""
print(">splitting data..")
np.random.seed(seed_var) # for splitting consistency
[m, n] = csr_matrix.shape
train_indices = np.random.choice(m, int(round(m*a//(a+b+c))), replace=False)
remain_indices = np.array(list(set(range(m)) - set(train_indices)))
valid_indices = np.random.choice(remain_indices, int(round(len(remain_indices)*b//(b + c))), replace=False)
test_indices = np.array(list(set(remain_indices) - set(valid_indices)))
np.random.seed() # cancel seed effect
print("total samples being split: ", len(train_indices) + len(valid_indices) + len(test_indices))
print('train:', len(train_indices), ' valid:', len(valid_indices), 'test:', len(test_indices))
csr_train = csr_matrix[train_indices, :]
csr_valid = csr_matrix[valid_indices, :]
csr_test = csr_matrix[test_indices, :]
return (csr_train, csr_valid, csr_test, train_indices, valid_indices, test_indices)
# STAT CALCULATION #
def nnzero_rate_df(df):
idx = df != 0
nnzero_rate = round(sum(sum(idx.values)) / df.size, 3)
return nnzero_rate
def nnzero_count_df(df):
idx = df != 0
nnzero_count = sum(sum(idx.values))
return nnzero_count
def mean_df(df):
Sum = sum(sum(df.values))
Mean = Sum / df.size
return (Mean)
def square_err(arr1, arr2):
'''
arr1 and arr2 of same shape, return squared err between them
arr and df both works
'''
diff = np.subtract(arr1, arr2)
square_err_ = np.sum(np.power(diff, 2))
count = int(arr1.shape[0] * arr1.shape[1])
return square_err_, count
def square_err_omega(arr, arr_ground_truth):
'''
input: arr and arr_ground_truth of same shape
return: squared err omega (excluding zeros in ground truth)
arr and df both works
only zeros are ignored, negatives should not show up
'''
omega = np.sign(arr_ground_truth)
diff = np.subtract(arr, arr_ground_truth)
square_err_ = np.power(diff, 2)
square_err_nz = np.sum(np.multiply(square_err_, omega))
count = int(arr.shape[0] * arr.shape[1])
return square_err_nz, count
def mse_omega(arr_h, arr_m):
'''arr and df both works'''
omega = np.sign(arr_m) # if x>0, 1; elif x == 0, 0;
diff = np.subtract(arr_h, arr_m)
squared = np.power(diff, 2)
non_zero_squared = np.multiply(squared, omega)
mse_omega = np.mean(np.mean(non_zero_squared))
return mse_omega
def mse(arr_h, arr_m):
'''MSE between H and M'''
diff = np.subtract(arr_h, arr_m)
squared = np.power(diff, 2)
mse = np.mean(np.mean(squared))
return mse
def nz_std(X, Y):
'''
Goal: Evaluate gene-level imputation with STD of non-zero values of that gene
Takes two cell_row DFs, X and Y, with same shape
Calculate STD for each column(gene)
Treating zeros in X as Nones, And corresponding values in Y as Nones, too
:param X: Input cell_row matrix
:param Y: Imputation cell_row matrix
:return: two list of NZ_STDs, used for evaluation of imputation
'''
idx_zeros = (X == 0)
X_ = X.copy()
Y_ = Y.copy()
X_[idx_zeros] = None
Y_[idx_zeros] = None
return (X_.std(), Y_.std())
def nz2_corr(x, y):
'''
the nz2_corr between two vectors, excluding any element with zero in either vectors
:param x: vector1
:param y: vector2
:return:
'''
nas = np.logical_or(x == 0, y == 0)
result = pearson_cor(x[~nas], y[~nas])
if not math.isnan(result):
result = round(result, 4)
return result
def gene_mse_nz_from_df(Y, X):
'''
get gene_mse from gene_expression_df (cell_row, with cell_id as index)
X: input/ground-truth
Y: imputation
return a [gene, 1] pd.series with index of gene_ids
'''
mse_df = pd.DataFrame(columns=['gene_name'])
for i in range(X.shape[1]):
mse_ = scimpute.mse_omega(Y.iloc[:, i], X.iloc[:, i])
gene_name = X.columns[i]
mse_df.loc[X.columns[i], 'gene_name']= mse_
mse_df = mse_df.iloc[:, 0]
print(mse_df.head(), '\n', mse_df.shape)
return mse_df
def combine_gene_imputation_of_two_df(Y1, Y2, metric1, metric2, mode='smaller'):
'''
Y1, Y2: two imputation results (cell_row, df)
Metric1, Metric2: [num-gene, 1], df, same metircs for Y1 and Y2, e.g. MSE, SD
select rows of Y1, Y2 into Y_combined
mode: smaller/larger (being selected), e.g. smaller MSE, larger SD
Output in index/column order of Y1
'''
if mode == 'smaller':
idx_better = metric1 < metric2
elif mode == 'larger':
idx_better = metric1 > metric2
else:
raise Exception('mode err')
# try:
# idx_better = idx_better.iloc[:, 0] # df to series, important
# except 'IndexingError':
# pass
print('yg_better boolean series:\n', idx_better.head())
Y_better_lst = [Y1.transpose()[idx_better],
Y2.transpose()[~idx_better]] # list of frames
Y_better = pd.concat(Y_better_lst)
Y_better = Y_better.transpose() # tr back
Y_better = Y_better.loc[
Y1.index, Y1.columns] # get Y1 original order, just in case
print('Y1:\n', Y1.iloc[:5, :3])
print('Y2:\n', Y2.iloc[:5, :3])
print("metrics1:\n", metric1.iloc[:5])
print("metrics2:\n", metric2.iloc[:5])
print('Y_combined:\n', Y_better.iloc[:5, :3])
return Y_better
# PLOTS #
def refresh_logfolder(log_dir):
'''delete and recreate log_dir'''
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
print(log_dir, "deleted")
tf.gfile.MakeDirs(log_dir)
print(log_dir, 'created\n')
def max_min_element_in_arrs(arr_list):
'''input a list of np.arrays
e.g: max_element_in_arrs([df_valid.values, h_valid])'''
max_list = []
for x in arr_list:
max_tmp = np.nanmax(x)
max_list.append(max_tmp)
max_all = np.nanmax(max_list)
min_list = []
for x in arr_list:
min_tmp = np.nanmin(x)
min_list.append(min_tmp)
min_all = np.nanmin(min_list)
return max_all, min_all
def scatterplot(x, y,
title='scatterplot', dir='plots', xlab='xlab', ylab='ylab',
alpha=1):
if not os.path.exists(dir):
os.makedirs(dir)
fname = "./{}/{}".format(dir, title)
fig = plt.figure(figsize=(5, 5))
plt.plot(x, y, 'o', alpha=alpha)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('heatmap vis ', title, ' done')
def scatterplot2(x, y, title='title', xlabel='x', ylabel='y', range='same', dir='plots'):
'''
x is slice, y is a slice
have to be slice to help pearsonr(x,y)[0] work
range= same/flexible
:param x:
:param y:
:param title:
:param xlabel:
:param ylabel:
:param range:
:param dir:
:param corr:
:return:
'''
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# corr
corr = pearson_cor(x, y)
if not math.isnan(corr):
corr = str(round(corr, 4))
# nz2_corr
nz_corr = nz2_corr(x, y)
print('corr: {}; nz_corr: {}'.format(corr, nz_corr))
# density plot
from scipy.stats import gaussian_kde
# Calculate the point density
xy = np.vstack([x, y])
try:
z = gaussian_kde(xy)(xy)
# sort: dense on top (plotted last)
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
# plt
fig = plt.figure(figsize=(5, 5))
fig, ax = plt.subplots()
cax = ax.scatter(x, y, c=z, s=50, edgecolor='')
plt.colorbar(cax)
except np.linalg.linalg.LinAlgError:
plt.plot(x, y, 'b.', alpha=0.3)
plt.title('{}\ncorr: {}; corr-nz: {}'.format(title, corr, nz_corr)) # nz2
plt.xlabel(xlabel + "\nmean: " + str(round(np.mean(x), 2)))
plt.ylabel(ylabel + "\nmean: " + str(round(np.mean(y), 2)))
if range is 'same':
max, min = max_min_element_in_arrs([x, y])
plt.xlim(min, max)
plt.ylim(min, max)
elif range is 'flexible':
next
else:
plt.xlim(range[0], range[1])
plt.ylim(range[0], range[1])
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close('all')
def density_plot(x, y,
title='density plot', dir='plots', xlab='x', ylab='y'):
'''x and y must be arr [m, 1]'''
from scipy.stats import gaussian_kde
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fname = "./{}/{}".format(dir, title)
# Calculate the point density
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
# sort: dense on top (plotted last)
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
# plt
fig = plt.figure(figsize=(5, 5))
fig, ax = plt.subplots()
cax = ax.scatter(x, y, c=z, s=50, edgecolor='')
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.colorbar(cax)
plt.savefig(fname + ".png", bbox_inches='tight')
plt.close(fig)
def gene_pair_plot(df, list, tag, dir='./plots'):
'''
scatterplot2 of two genes in a df
:param df: [cells, genes]
:param list: [2, 3] OR [id_i, id_j]
:param tag: output_tag e.g. 'PBMC'
:param dir: output_dir
:return:
'''
for i, j in list:
print('gene_pair: ', i, type(i), j, type(j))
try:
x = df.ix[:, i]
y = df.ix[:, j]
except KeyError:
print('KeyError: the gene index does not exist')
continue
scatterplot2(x, y,
title='Gene' + str(i) + ' vs Gene' + str(j) + '\n' + tag,
xlabel='Gene' + str(i), ylabel='Gene' + str(j),
dir=dir)
def cluster_scatterplot(df2d, labels, title):
'''
PCA or t-SNE 2D visualization
`cluster_scatterplot(tsne_projection, cluster_info.Cluster.values.astype(int),
title='projection.csv t-SNE')`
:param df2d: PCA or t-SNE projection df, cell as row, feature as columns
:param labels:
:param title:
:return:
'''
legends = np.unique(labels)
print('all labels:', legends)
fig = plt.figure(figsize=(8, 6))
ax = plt.subplot(111)
for i in legends:
_ = df2d.iloc[labels == i]
num_cells = str(len(_))
percent_cells = str(round(int(num_cells) / len(df2d) * 100, 1)) + '%'
ax.scatter(_.iloc[:, 0], _.iloc[:, 1],
alpha=0.5, marker='.',
label='c' + str(i) + ':' + num_cells + ', ' + percent_cells
)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title(title)
plt.xlabel('legend format: cluster_id:num-cells')
plt.savefig(title + '.png', bbox_inches='tight')
plt.show()
plt.close('all')
def pca_tsne(df_cell_row, cluster_info=None, title='data', dir='plots',
num_pc=50, num_tsne=2, ncores=8):
'''
PCA and tSNE plots for DF_cell_row, save projections.csv
:param df_cell_row: data matrix, features as columns, e.g. [cell, gene]
:param cluster_info: cluster_id for each cell_id
:param title: figure title, e.g. Late
:param num_pc: 50
:param num_tsne: 2
:return: tsne_df, plots saved, pc_projection.csv, tsne_projection.csv saved
'''
if not os.path.exists(dir):
os.makedirs(dir)
title = './'+dir+'/'+title
df = df_cell_row
if cluster_info is None:
cluster_info = pd.DataFrame(0, index=df.index, columns=['cluster_id'])
tic = time.time()
# PCA
pca = PCA(n_components=num_pc)
pc_x = pca.fit_transform(df)
df_pc_df = pd.DataFrame(data=pc_x, index=df.index, columns=range(num_pc))
df_pc_df.index.name = 'cell_id'
df_pc_df.columns.name = 'PC'
df_pc_df.to_csv(title+'.pca.csv')
print('dim before PCA', df.shape)
print('dim after PCA', df_pc_df.shape)
print('explained variance ratio: {}'.format(
sum(pca.explained_variance_ratio_)))
colors = cluster_info.reindex(df_pc_df.index)
colors = colors.dropna().iloc[:, 0]
print('matched cluster_info:', colors.shape)
print('unmatched data will be excluded from the plot') # todo: include unmatched
df_pc_ = df_pc_df.reindex(colors.index) # only plot labeled data?
cluster_scatterplot(df_pc_, colors.values.astype(str), title=title+' (PCA)')
# tSNE
print('MCORE-TSNE, with ', ncores, ' cores')
df_tsne = TSNE(n_components=num_tsne, n_jobs=ncores).fit_transform(df_pc_)
print('tsne done')
df_tsne_df = pd.DataFrame(data=df_tsne, index=df_pc_.index)
print('wait to output tsne')
df_tsne_df.to_csv(title+'.tsne.csv')
print('wrote tsne to output')
cluster_scatterplot(df_tsne_df, colors.values.astype(str), title=title+' ('
't-SNE)')
toc = time.time()
print('PCA and tSNE took {:.1f} seconds\n'.format(toc-tic))
return df_tsne_df
def heatmap_vis(arr, title='visualization of matrix in a square manner', cmap="rainbow",
vmin=None, vmax=None, xlab='', ylab='', dir='plots'):
'''heatmap visualization of 2D matrix, with plt.imshow(), in a square manner
cmap options PiYG for [neg, 0, posi]
Greys Reds for [0, max]
rainbow for [0,middle,max]'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = './' + dir + '/' + title + '.vis.png'
if (vmin is None):
vmin = np.min(arr)
if (vmax is None):
vmax = np.max(arr)
fig = plt.figure(figsize=(9, 9))
plt.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax, aspect='auto')
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.colorbar()
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('heatmap vis ', title, ' done')
def heatmap_vis2(arr, title='visualization of matrix', cmap="rainbow",
vmin=None, vmax=None, xlab='', ylab='', dir='plots'):
'''heatmap visualization of 2D matrix, with plt.pcolor()
cmap options PiYG for [neg, 0, posi]
Greys Reds for [0, max]
rainbow for [0,middle,max]'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = './' + dir + '/' + title + '.vis.png'
if (vmin is None):
vmin = np.min(arr)
if (vmax is None):
vmax = np.max(arr)
fig = plt.figure(figsize=(9, 9))
plt.pcolor(arr, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.colorbar()
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('heatmap vis ', title, ' done')
def curveplot(x, y, title, xlabel, ylabel, dir='plots'):
# scimpute.curveplot(epoch_log, corr_log_valid,
# title='learning_curve_pearsonr.step2.gene'+str(j)+", valid",
# xlabel='epoch',
# ylabel='Pearson corr (predction vs ground truth, valid, including cells with zero gene-j)')
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# plot
plt.plot(x, y)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close()
def curveplot2(x, y, z, title, xlabel, ylabel, dir='plots'):
'''curveplot2(epoch_log, train_log, valid_log, title="t", xlabel="x", ylabel="y")'''
# scimpute.curveplot2(epoch_log, corr_log_train, corr_log_valid,
# title='learning_curve_pearsonr.step2.gene'+str(j)+", train_valid",
# xlabel='epoch',
# ylabel='Pearson corr (predction vs ground truth, valid, including cells with zero gene-j)')
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# plot
plt.plot(x, y, label='train')
plt.plot(x, z, label='valid')
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close()
def hist_list(list, xlab='xlab', title='histogram', bins=100, dir='plots'):
'''output histogram of a list into png'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = str(title) + '.png'
fname = "./{}/{}".format(dir, fname)
fig, ax = plt.subplots()
plt.title(title)
plt.xlabel(xlab)
plt.ylabel('Density')
hist = plt.hist(list, bins=bins, density=True)
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('hist of {} is done'.format(title))
return hist
def hist_arr_flat(arr, title='hist', xlab='x', ylab='Frequency', bins=100, dir='plots'):
'''create histogram for flattened arr'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = "./{}/{}".format(dir, title) + '.png'
fig = plt.figure(figsize=(9, 9))
n, bins, patches = plt.hist(arr.flatten(), bins, normed=1, facecolor='green', alpha=0.75)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print("histogram ", title, ' done')
def hist_df(df, title="hist of df", xlab='xlab', bins=100, dir='plots', range=None):
if not os.path.exists(dir):
os.makedirs(dir)
df_flat = df.values.reshape(df.size, 1)
# fig = plt.figure(figsize=(9, 9))
hist = plt.hist(df_flat, bins=bins, density=True, range=range)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel('Density')
plt.savefig('./{}/{}.png'.format(dir, title), bbox_inches='tight')
plt.close()
print('hist of ', title, 'is done')
return hist
def pearson_cor (x, y):
'''This function calculates Pearson correlation between vector x and y.
It returns nan if x or y has 2 data points or less, or does not vary
Parameters
------------
x: numpy array
y: numpy array
Return
-----------
Pearson correlation or nan
'''
if (len(x) > 2) and (x.std() > 0) and (y.std() > 0):
corr = pearsonr(x, y)[0]
else:
corr = np.nan
return corr
def hist_2matrix_corr(arr1, arr2, mode='column-wise', nz_mode='ignore',
title='hist_corr', dir='plots'):
'''Calculate correlation between two matrices column-wise or row-wise
default: arr[cells, genes], gene-wise corr (column-wise)
assume: arr1 from benchmark matrix (e.g. input), arr2 from imputation
if corr = NaN, it will be excluded from result
mode: column-wise, row-wise
nz_mode:
ignore (all values in vectors included)
strict (zero values excluded from both vector x,y)
first (zero values excluded from x in arr1 only,
title: 'hist_corr' or custom
dir: 'plots' or custom
'''
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# if arr1.shape is arr2.shape:
if mode == 'column-wise':
range_size = arr2.shape[1]
elif mode == 'row-wise':
range_size = arr2.shape[0]
else:
raise Exception('mode not recognized')
hist = []
for i in range(range_size):
if mode == 'column-wise':
x = arr1[:, i]
y = arr2[:, i]
elif mode == 'row-wise':
x = arr1[i, :]
y = arr2[i, :]
else:
raise Exception('mode not recognized')
# Pearson correlation can be calculated
# only when there are more than 2 nonzero
# values, and when the standard deviation
# is positive for both x and y
if nz_mode == 'strict':
nas = np.logical_or(x==0, y==0)
corr = pearson_cor (x[~nas], y[~nas])
elif nz_mode == 'first':
nas = (x==0)
corr = pearson_cor (x[~nas], y[~nas])
elif nz_mode == 'ignore':
corr = pearson_cor(x, y)
else:
raise Exception('nz_mode not recognized')
if not math.isnan(corr):
hist.append(corr)
print('correlation calculation completed')
hist.sort()
median_corr = round(np.median(hist), 3)
mean_corr = round(np.mean(hist), 3)
print(title)
print('median corr: {} mean corr: {}'.format(median_corr, mean_corr))
# histogram of correlation
fig = plt.figure(figsize=(5, 5))
plt.hist(hist, bins=100, density=True)
plt.xlabel('median=' + str(median_corr) + ', mean=' + str(mean_corr))
plt.ylabel('Density') #todo freq to density
plt.xlim(-1, 1)
plt.title(title)
plt.savefig(fprefix + ".png", bbox_inches='tight') #todo remove \n from out-name
plt.close(fig)
return hist
# TF #
def variable_summaries(name, var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name):
# mean = tf.reduce_mean(var)
# tf.summary.scalar('mean', mean)
# with tf.name_scope('stddev'):
# stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
# tf.summary.scalar('stddev', stddev)
# tf.summary.scalar('max', tf.reduce_max(var))
# tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def weight_variable(name_scope, dim_in, dim_out, sd):
"""
define weights
:param name_scope:
:param dim_in:
:param dim_out:
:param sd:
:return:
"""
with tf.name_scope(name_scope):
W = tf.Variable(tf.random_normal([dim_in, dim_out], stddev=sd),
name=name_scope + '_W')
variable_summaries(name_scope + '_W', W)
return W
def bias_variable(name_scope, dim_out, sd):
"""
define biases
:param name_scope:
:param dim_out:
:param sd:
:return:
"""
with tf.name_scope(name_scope):
b = tf.Variable(tf.random_normal([dim_out], mean=100 * sd, stddev=sd),
name=name_scope + '_b')
variable_summaries(name_scope + '_b', b)
return b
def weight_bias_variable(name_scope, dim_in, dim_out, sd):
"""
define weights and biases
:param name_scope:
:param dim_in:
:param dim_out:
:param sd:
:return:
"""
with tf.name_scope(name_scope):
W = tf.Variable(tf.random_normal([dim_in, dim_out], stddev=sd, dtype=tf.float32),
name=name_scope + '_W')
b = tf.Variable(tf.random_normal([dim_out], mean=100 * sd, stddev=sd, dtype=tf.float32),
name=name_scope + '_b')
variable_summaries(name_scope + '_W', W)
variable_summaries(name_scope + '_b', b)
return W, b
def dense_layer(name, input, W, b, pRetain):
"""
define a layer and return output
:param name:
:param input: X_placeholder or a(l-1)
:param W: weights
:param b: biases
:param pRetain:
:return:
"""
x_drop = tf.nn.dropout(input, pRetain)
z = tf.add(tf.matmul(x_drop, W), b)
a = tf.nn.relu(z)
variable_summaries(name + '_a', a)
return a
def dense_layer_BN(name, input, W, b, pRetain, epsilon=1e-3):
"""
define a layer and return output
:param name:
:param input: X_placeholder or a(l-1)
:param W: weights
:param b: biases
:param pRetain:
:return:
"""
x_drop = tf.nn.dropout(input, pRetain)
z = tf.add(tf.matmul(x_drop, W), b)
# BN
batch_mean, batch_var = tf.nn.moments(z, [0])
z_bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, epsilon)
# NL
a = tf.nn.relu(z_bn)
variable_summaries(name + '_a', a)
return a
def learning_curve_mse(epoch, mse_batch, mse_valid,
title='learning curve (MSE)', xlabel='epochs', ylabel='MSE',
range=None,
dir='plots'):
"""
depreciated
"""
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
# list to np.array, to use index
epoch = np.array(epoch)
mse_batch = np.array(mse_batch)
# mse_train = np.array(mse_train)
mse_valid = np.array(mse_valid)
# plot (full range)
fprefix = "./{}/{}".format(dir, title)
plt.plot(epoch, mse_batch, 'b--', label='mse_batch')
# plt.plot(epoch, mse_train, 'g--', label='mse_train')
plt.plot(epoch, mse_valid, 'r-', label='mse_valid')
plt.title(title)
plt.xlabel(xlabel + '\nfinal valid mse:' + str(mse_valid[-1]))
plt.ylabel(ylabel)
plt.legend()
if range is None:
max, min = max_min_element_in_arrs([mse_batch, mse_valid])
# max, min = max_min_element_in_arrs([mse_batch, mse_train, mse_valid])
plt.ylim(min, max)
else:
plt.ylim(range[0], range[1])
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close()
# plot (no epoch0)
fprefix = "./{}/{}".format(dir, title) + '.cropped'
zoom = np.arange(1, len(mse_batch))
plt.plot(epoch[zoom], mse_batch[zoom], 'b--', label='mse_batch')
# plt.plot(epoch[zoom], mse_train[zoom], 'g--', label='mse_train')
plt.plot(epoch[zoom], mse_valid[zoom], 'r-', label='mse_valid')
plt.title(title)
plt.xlabel(xlabel + '\nfinal valid mse:' + str(mse_valid[-1]))
plt.ylabel(ylabel)
plt.legend()
if range is None:
max, min = max_min_element_in_arrs([mse_batch[zoom], mse_valid[zoom]])
# max, min = max_min_element_in_arrs([mse_batch, mse_train, mse_valid])
plt.ylim(min, max)
else:
plt.ylim(range[0], range[1])
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close()
def learning_curve_corr(epoch, corr_batch, corr_valid,
title='learning curve (corr)',
xlabel='epochs',
ylabel='median cell-corr (100 cells)',
range=None,
dir='plots'):
"""
depreciated
"""
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
# list to np.array, to use index
epoch = | np.array(epoch) | numpy.array |
from ..allo_extent import (_scale_size, _s_eff, _p, _h, _d_bound, _mu, _calc_w,
_calc_f, _dim, _find_plane_z, _find_row_y,
_find_column_x, _calc_g_point_separated, _calc_Nz, get_gains)
import numpy as np
import numpy.testing as npt
# flake8: noqa
def speaker_positions(names):
P = lambda x, y, z: np.array([x, y, z])
pos = {"M+000": P( 0.0, 1.0, 0.0),
"M+SC": P(-0.414214, 1.0, 0.0),
"M-SC": P( 0.414214, 1.0, 0.0),
"M+030": P(-1.0, 1.0, 0.0),
"M-030": P( 1.0, 1.0, 0.0),
"M+060": P(-1.0, 0.414214, 0.0),
"M-060": P( 1.0, 0.414214, 0.0),
"M+090": P(-1.0, 0.0, 0.0),
"M-090": P( 1.0, 0.0, 0.0),
"M+110": P(-1.0, -1.0, 0.0), # M?110 and M?135 are treated as synonyms
"M-110": P( 1.0, -1.0, 0.0),
"M+135": P(-1.0, -1.0, 0.0),
"M-135": P( 1.0, -1.0, 0.0),
"M+180": P( 0.0, -1.0, 0.0),
"U+000": P( 0.0, 1.0, 1.0),
"U+030": P(-1.0, 1.0, 1.0), # U?030 and U?045 are treated as synonyms
"U-030": P( 1.0, 1.0, 1.0),
"U+045": P(-1.0, 1.0, 1.0),
"U-045": P( 1.0, 1.0, 1.0),
"U+090": P(-1.0, 0.0, 1.0),
"U-090": P( 1.0, 0.0, 1.0),
"U+110": P(-1.0, -1.0, 1.0), # U?110 and U?135 are treated as synonyms
"U-110": P( 1.0, -1.0, 1.0),
"U+135": P(-1.0, -1.0, 1.0),
"U-135": P( 1.0, -1.0, 1.0),
"U+180": P( 0.0, -1.0, 1.0),
"T+000": P( 0.0, 0.0, 1.0),
"B+000": P( 0.0, 1.0, -1.0),
"B+045": P(-1.0, 1.0, -1.0),
"B-045": P( 1.0, 1.0, -1.0)}
return np.array([pos[s] for s in names])
def test_allo_extent_scale_size():
# Check interpolation boundary points
npt.assert_almost_equal(_scale_size(0.0), 0.0)
npt.assert_almost_equal(_scale_size(0.2), 0.3)
npt.assert_almost_equal(_scale_size(0.5), 1.0)
npt.assert_almost_equal(_scale_size(0.75), 1.8)
npt.assert_almost_equal(_scale_size(1.0), 2.8)
# Check interpolation quarter and 3-quarter points
npt.assert_almost_equal(_scale_size(0.2 * (1.0 / 4.0)), 0.3 * (1.0 / 4.0))
npt.assert_almost_equal(_scale_size(0.2 * (3.0 / 4.0)), 0.3 * (3.0 / 4.0))
npt.assert_almost_equal(_scale_size(0.2 + 0.3 * (1.0 / 4.0)), 0.3 + 0.7 * (1.0 / 4.0))
npt.assert_almost_equal(_scale_size(0.2 + 0.3 * (3.0 / 4.0)), 0.3 + 0.7 * (3.0 / 4.0))
npt.assert_almost_equal(_scale_size(0.2 + 0.3 + 0.25 * (1.0 / 4.0)), 0.3 + 0.7 + 0.8 * (1.0 / 4.0))
npt.assert_almost_equal(_scale_size(0.2 + 0.3 + 0.25 * (3.0 / 4.0)), 0.3 + 0.7 + 0.8 * (3.0 / 4.0))
npt.assert_almost_equal(_scale_size(0.2 + 0.3 + 0.25 + 0.25 * (1.0 / 4.0)), 0.3 + 0.7 + 0.8 + 1.0 * (1.0 / 4.0))
npt.assert_almost_equal(_scale_size(0.2 + 0.3 + 0.25 + 0.25 * (3.0 / 4.0)), 0.3 + 0.7 + 0.8 + 1.0 * (3.0 / 4.0))
# Test clipping
npt.assert_almost_equal(_scale_size(1.5), 2.8)
npt.assert_almost_equal(_scale_size(2.0), 2.8)
npt.assert_almost_equal(_scale_size(3.0), 2.8)
def test_allo_extent_s_eff():
sp = speaker_positions
# 1D cases => Expect _s_eff to be size_x
speaker_positions_A = sp(["M+030", "M-030"])
npt.assert_almost_equal(_s_eff(speaker_positions_A, 0.0, 0.5, 1.0), 0.0)
npt.assert_almost_equal(_s_eff(speaker_positions_A, 0.25, 0.5, 1.0), 0.25)
npt.assert_almost_equal(_s_eff(speaker_positions_A, 0.5, 0.0, 0.0), 0.5)
# Adding in the M+000 speaker should keep this as a 1D case
npt.assert_almost_equal(_s_eff(sp(["M+030", "M-030", "M+000"]), 0.5, 0.25, 0.75), 0.5)
# Just M-110 and M+110 speakers is also 1D
npt.assert_almost_equal(_s_eff(sp(["M-110", "M+110"]), 0.5, 0.25, 0.75), 0.5)
# 2D cases => Expect _s_eff to be 0.75*s1 + 0.25*s2 for s1, s2 = descend_sorted(size_x, size_y)
speaker_positions_B = sp(["M+000", "M+030", "M-030", "M+110", "M-110"])
npt.assert_almost_equal(_s_eff(speaker_positions_B, 1.0, 0.0, 0.0), 0.75 * 1.0 + 0.25 * 0.0)
npt.assert_almost_equal(_s_eff(speaker_positions_B, 0.0, 1.0, 0.0), 0.75 * 1.0 + 0.25 * 0.0)
npt.assert_almost_equal(_s_eff(speaker_positions_B, 0.0, 0.0, 1.0), 0.75 * 0.0 + 0.25 * 0.0)
npt.assert_almost_equal(_s_eff(speaker_positions_B, 1.0, 1.0, 0.0), 0.75 * 1.0 + 0.25 * 1.0)
npt.assert_almost_equal(_s_eff(speaker_positions_B, 0.5, 1.0, 0.0), 0.75 * 1.0 + 0.25 * 0.5)
# Adding M+090 and M-090 speakers is still 2D
npt.assert_almost_equal(_s_eff(sp(["M+000", "M+030", "M-030", "M+110", "M-110", "M+090", "M-090"]), 0.5, 1.0, 0.25), 0.75 * 1.0 + 0.25 * 0.5)
# 3D cases => Expect _s_eff to be (6/9)s1 + (2/9)s2 + (1/9)s3 for s1, s2 = descend_sorted(size_x, size_y, size_z)
speaker_positions_C = sp(["M+000", "M-030", "M-030", "M+110", "M-110", "U+030", "U-030"])
npt.assert_almost_equal(_s_eff(speaker_positions_C, 1.0, 0.0, 0.0 ), (6 * 1.0 + 2 * 0.0 + 1 * 0.0 ) / 9.0)
npt.assert_almost_equal(_s_eff(speaker_positions_C, 0.0, 1.0, 0.0 ), (6 * 1.0 + 2 * 0.0 + 1 * 0.0 ) / 9.0)
npt.assert_almost_equal(_s_eff(speaker_positions_C, 0.0, 0.0, 1.0 ), (6 * 1.0 + 2 * 0.0 + 1 * 0.0 ) / 9.0)
npt.assert_almost_equal(_s_eff(speaker_positions_C, 1.0, 1.0, 0.0 ), (6 * 1.0 + 2 * 1.0 + 1 * 0.0 ) / 9.0)
npt.assert_almost_equal(_s_eff(speaker_positions_C, 0.0, 1.0, 1.0 ), (6 * 1.0 + 2 * 1.0 + 1 * 0.0 ) / 9.0)
npt.assert_almost_equal(_s_eff(speaker_positions_C, 1.0, 0.0, 1.0 ), (6 * 1.0 + 2 * 1.0 + 1 * 0.0 ) / 9.0)
npt.assert_almost_equal(_s_eff(speaker_positions_C, 1.0, 1.0, 1.0 ), (6 * 1.0 + 2 * 1.0 + 1 * 1.0 ) / 9.0)
npt.assert_almost_equal(_s_eff(speaker_positions_C, 1.0, 0.5, 0.25), (6 * 1.0 + 2 * 0.5 + 1 * 0.25) / 9.0)
def test_allo_extent_p():
# p = 6 if s_eff <= 0.5
# (s_eff - 0.5)
# = 6 + ------------- * -4 if s_eff > 0.5
# (s_max - 0.5)
#
# where s_max = 2.8
npt.assert_almost_equal(_p(0.0), 6.0)
npt.assert_almost_equal(_p(0.25), 6.0)
npt.assert_almost_equal(_p(0.5), 6.0)
npt.assert_almost_equal(_p(1.0), 6.0 + ((0.5 / 2.3) * -4))
npt.assert_almost_equal(_p(2.0), 6.0 + ((1.5 / 2.3) * -4))
npt.assert_almost_equal(_p(2.8), 2.0)
def test_allo_extent_h():
# [ max(2s, 0.4)^3 ]^(1/3)
# h(c, s, d_bound) = [ -------------- ] if d_bound >= 2*s and d_bound >= 0.4
# [ 0.16 * 2s ]
# Note that 'c' isn't used, so we don't even take it as a parameter
# in our reference
def h1(s, d_bound):
return ((max(2.0*s, 0.4) ** 3.0) / (0.16 * 2.0 * s)) ** (1.0/3.0)
npt.assert_almost_equal(_h(0.0, 0.2, 0.4), h1(0.2, 0.4))
npt.assert_almost_equal(_h(1.0, 0.2, 0.5), h1(0.2, 0.5))
npt.assert_almost_equal(_h(0.5, 0.25, 0.5), h1(0.25, 0.5))
npt.assert_almost_equal(_h(0.4, 0.2, 0.6), h1(0.2, 0.6))
npt.assert_almost_equal(_h(0.3, 0.25, 0.6), h1(0.25, 0.6))
npt.assert_almost_equal(_h(0.2, 0.3, 0.6), h1(0.3, 0.6))
# [ d_bound ( d_bound )^2 ]^(1/3)
# h(c, s, d_bound) = [ ------- * ( ------- ) ] otherwise
# [ 2 ( 0.4 ) ]
def h2(s, d_bound):
return (((d_bound / 0.4) ** 2.0) * (d_bound/(2.0))) ** (1.0 / 3.0)
npt.assert_almost_equal(_h(0.0, 0.2, 0.3), h2(0.2, 0.3))
npt.assert_almost_equal(_h(0.0, 0.2, 0.2), h2(0.2, 0.2))
npt.assert_almost_equal(_h(0.0, 0.2, 0.1), h2(0.2, 0.1))
npt.assert_almost_equal(_h(0.0, 0.2, 0.0), h2(0.2, 0.0))
npt.assert_almost_equal(_h(0.0, 0.25, 0.4), h2(0.25, 0.4))
npt.assert_almost_equal(_h(0.0, 0.25, 0.3), h2(0.25, 0.3))
npt.assert_almost_equal(_h(0.0, 0.25, 0.2), h2(0.25, 0.2))
npt.assert_almost_equal(_h(0.0, 0.25, 0.1), h2(0.25, 0.1))
npt.assert_almost_equal(_h(0.0, 0.25, 0.0), h2(0.25, 0.0))
# We expect the piecemeal function to be continuous
# npt.assert_almost_equal(h1(0.4, 0.4), h2(0.4, 0.4))
# npt.assert_almost_equal(h1(0.5, 0.5), h2(0.5, 0.5))
# npt.assert_almost_equal(h1(0.6, 0.6), h2(0.6, 0.6))
# npt.assert_almost_equal(h1(0.7, 0.7), h2(0.7, 0.7))
def test_allo_extent_d_bound():
# { min(xo + 1, 1 - xo) if dim=1
# d_bound(dim, xo, yo, zo) = { min(xo + 1, 1 - xo, yo + 1, 1 - yo) if dim=2
# { min(xo + 1, 1 - xo, yo + 1, 1 - yo, zo + 1, 1 - zo) otherwise
# dim=1 cases
npt.assert_almost_equal(_d_bound(1, 0, 0, 0), 1)
npt.assert_almost_equal(_d_bound(1, 0, 1, 1), 1)
npt.assert_almost_equal(_d_bound(1, 0.5, 1, 1), 0.5)
npt.assert_almost_equal(_d_bound(1, 0.75, 1, 1), 0.25)
npt.assert_almost_equal(_d_bound(1, -0.5, -1, 1), 0.5)
npt.assert_almost_equal(_d_bound(1, -0.75, 1, 1), 0.25)
npt.assert_almost_equal(_d_bound(1, 1, 0.25, 0.5), 0.0)
npt.assert_almost_equal(_d_bound(1, -1, -0.5, 0.75), 0.0)
# dim=2 cases
npt.assert_almost_equal(_d_bound(2, 0, 0, 0), 1)
npt.assert_almost_equal(_d_bound(2, 0, 1, 1), 0)
npt.assert_almost_equal(_d_bound(2, 0.5, 0, 0), 0.5)
npt.assert_almost_equal(_d_bound(2, -0.5, 0, 0), 0.5)
npt.assert_almost_equal(_d_bound(2, 0, 0.75, 1), 0.25)
npt.assert_almost_equal(_d_bound(2, 0, -0.75, -1), 0.25)
# dim=3 cases
npt.assert_almost_equal(_d_bound(3, 0, 0, 0), 1)
npt.assert_almost_equal(_d_bound(3, 0, 0, 1), 0)
npt.assert_almost_equal(_d_bound(3, 0, 1, 0), 0)
npt.assert_almost_equal(_d_bound(3, 1, 0, 0), 0)
npt.assert_almost_equal(_d_bound(3, 0, 0, -1), 0)
npt.assert_almost_equal(_d_bound(3, 0, -1, 0), 0)
npt.assert_almost_equal(_d_bound(3, -1, 0, 0), 0)
npt.assert_almost_equal(_d_bound(3, 0, 0, 0.25), 0.75)
npt.assert_almost_equal(_d_bound(3, 0, 0.25, 0), 0.75)
npt.assert_almost_equal(_d_bound(3, 0.25, 0, 0), 0.75)
npt.assert_almost_equal(_d_bound(3, 0, 0, -0.25), 0.75)
npt.assert_almost_equal(_d_bound(3, 0, -0.25, 0), 0.75)
npt.assert_almost_equal(_d_bound(3, -0.25, 0, 0), 0.75)
npt.assert_almost_equal(_d_bound(3, 0.75, 0, 0.25), 0.25)
npt.assert_almost_equal(_d_bound(3, 0, -0.25, 0.75), 0.25)
npt.assert_almost_equal(_d_bound(3, 0.25, 0, -0.75), 0.25)
def test_allo_extent_mu():
# { h(xo, sx)^3 if dim=1
# mu(dim, sx, sy, sz, xo, yo, zo) = { h(xo, sx)h(yo, sy)^(3/2) if dim=2
# { h(xo, sx)h(yo, sy)h(zo, sz) if dim=3
for dim in range(1, 4):
# sx sy sz xo yo zo
for sx, sy, sz, xo, yo, zo in [(0.2, 0.1, 0.1, 0.0, 0.0, 0.0),
(0.2, 0.1, 0.1, 1.0, 0.0, 0.0),
(0.2, 0.1, 0.1, 0.5, 0.0, 0.0),
(0.1, 0.1, 0.1, -1.0, 0.0, 0.0),
(0.5, 1.0, 0.1, 0.0, 0.1, 0.0),
(0.1, 0.1, 0.1, 0.7, 0.8, 0.9),
(0.2, 0.3, 0.4, -1.0, -0.9, -0.5),
(0.8, 0.7, 0.6, 0.5, -0.4, 0.3),
(1.0, 1.0, 1.0, 0.0, 0.0, 0.0)]:
if dim == 1:
npt.assert_almost_equal(_mu(dim, sx, sy, sz, xo, yo, zo), _h(xo, sx, _d_bound(dim, xo, yo, zo)) ** 3)
elif dim == 2:
db = _d_bound(dim, xo, yo, zo)
npt.assert_almost_equal(_mu(dim, sx, sy, sz, xo, yo, zo), (_h(xo, sx, db) * _h(yo, sy, db)) ** (1.5))
else:
db = _d_bound(dim, xo, yo, zo)
npt.assert_almost_equal(_mu(dim, sx, sy, sz, xo, yo, zo), _h(xo, sx, db) * _h(yo, sy, db) * _h(zo, sz, db))
def test_allo_extent_calc_w():
# w(xo, yo, zo, sx, sy, sz, xs, ys, zs) = [w(xs, xo, sx), w(ys, yo, sy), w(zs, zo, sz)]
# [ 3 ( xs - xo ) ]^4
# w(xs, xo, sx) = 10^-[ --- (---------) ]
# [ 2 ( 2*sx ) ]
#
# definitions of w(ys, yo, sy) is just like the
# definition of w(xs, xo, sx), just with x replaced by y/z)
#
# [ 3 ( zs - zo ) ]^4
# w(zs, zo, sz) = 10^-[ --- (---------) ]
# [ 2 ( sz ) ]
NEG130DB_LIM = 2.0*np.power(6.5,0.25)/3.0
def wxy(_s, _o, s_):
return 10.0 ** -((1.5 * np.minimum(np.abs(_s - _o) / (2*s_), np.ones(_s.shape)*NEG130DB_LIM)) ** 4.0)
def wz(_s, _o, s_):
return 10.0 ** -((1.5 * np.minimum(np.abs(_s - _o) / s_, np.ones(_s.shape)*NEG130DB_LIM)) ** 4.0)*np.cos(zs*np.pi/(7.0/3.0))
for xo, yo, zo, sx, sy, sz, xs, ys, zs in [
( 0.0, 0.0, 0.0, 0.1, 0.1, 0.1, np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5)),
( 0.1, 0.1, 0.1, 0.2, 0.2, 0.2, np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5)),
(-0.1, 0.8, -0.4, 0.9, 0.2, 0.4, np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5)),
( 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5)),
( 0.0, 0.0, 0.0, 0.2, 0.2, 0.2, np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5), np.linspace(-1.0,1.0,5))]:
npt.assert_almost_equal(_calc_w(xo, yo, zo, sx, sy, sz, xs, ys, zs), np.array([wxy(xs, xo, sx), wxy(ys, yo, sy), wz(zs, zo, sz)]))
def test_allo_extent_dim():
sp = speaker_positions
assert _dim(sp(["M+000"])) == 0
assert _dim(sp(["M+030", "M-030", "M+000"])) == 1
assert _dim(sp(["M+110", "M-110"])) == 1
assert _dim(sp(["U+030", "U-030"])) == 1
assert _dim(sp(["B+045", "B-045", "B+000"])) == 1
assert _dim(sp(["M+030", "M-030", "M+110", "M-110"])) == 2
assert _dim(sp(["M+030", "M-030", "M+000", "M+110", "M-110"])) == 2
assert _dim(sp(["M+030", "M-030", "M+000", "M-110", "M+090", "M-090"])) == 2
assert _dim(sp(["U+030", "U-030", "U+090", "U-090"])) == 2
assert _dim(sp(["U+030", "U-030", "U+090", "U-090", "U+180"])) == 2
assert _dim(sp(["M+030", "M-030", "M+110", "M-110", "U+030", "U-030"])) == 3
assert _dim(sp(["M+030", "M-030", "M+000", "M+110", "M-110", "U+030", "U-030"])) == 3
assert _dim(sp(["M+030", "M-030", "M+000", "M+110", "M-110", "M+090", "M-090", "U+030", "U-030"])) == 3
assert _dim(sp(["U+030", "U-030", "U+090", "U-090", "B+045", "B-045", "B+000"])) == 3
def test_allo_extent_calc_f():
# def _calc_f(p, w, g_point):
# ===
# \ p
# f = / [g_point(ss) * w(ss)]
# ===
# ss
V = lambda l: np.array(l).reshape([1, len(l)])
for p, w, g_point, expect in [
(1.0, V([1.0 ]), V([1.0 ]), 1.0), # (1*1)^1
(1.0, V([0.0 ]), V([1.0 ]), 0.0), # (0*1)^1
(0.5, V([2.0 ]), V([2.0 ]), 2.0), # (2*2)^0.5
(1.0, V([2.0 ]), V([2.0 ]), 4.0), # (2*2)^1
(1.0, V([1.0, 1.0]), V([1.0, 1.0]), 2.0), # (1*1)^1 + (1*1)^1
(2.0, V([1.0, 1.0]), V([1.0, 1.0]), 2.0), # (1*1)^2 + (1*1)^2
(0.5, V([3.0, 5.0]), V([3.0, 5.0]), 8.0)]: # (3*3)^0.5 + (5*5)^0.5
npt.assert_almost_equal(_calc_f(p, w, g_point), expect)
def test_allo_extent_find_plane_z():
sp = speaker_positions
# We could have a bit of tolerance in these tests for the None
# cases. Things should behave just fine if instead of returning None,
# the _find_plane_z returned the same number twice, or flipped the
# order of the number and the None.
assert _find_plane_z( 0.0, sp(["M+030"])) == (0.0, 0.0)
assert _find_plane_z( 0.5, sp(["M+030"])) == (0.0, None)
assert _find_plane_z(-0.5, sp(["M+030"])) == (None, 0.0)
assert _find_plane_z( 0.0, sp(["M+030", "U+030"])) == (0.0, 0.0)
assert _find_plane_z( 0.5, sp(["M+030", "U+030"])) == (0.0, 1.0)
assert _find_plane_z( 1.0, sp(["M+030", "U+030"])) == (1.0, 1.0)
assert _find_plane_z(-0.5, sp(["M+030", "U+030"])) == (None, 0.0)
assert _find_plane_z( 0.0, sp(["M+030", "U+030", "B+000"])) == (0.0, 0.0)
assert _find_plane_z( 0.5, sp(["M+030", "U+030", "B+000"])) == (0.0, 1.0)
assert _find_plane_z( 1.0, sp(["M+030", "U+030", "B+000"])) == (1.0, 1.0)
assert _find_plane_z(-0.5, sp(["M+030", "U+030", "B+000"])) == (-1.0, 0.0)
assert _find_plane_z(-1.0, sp(["M+030", "U+030", "B+000"])) == (-1.0, -1.0)
def test_allo_extent_find_row_y():
sp = speaker_positions
# Like with test_allo_extent_find_plane_z(), our pass fail criteria
# is a little bit strict.
assert _find_row_y( 0.0, 0.0, sp(["M+030"])) == (None, 1.0)
assert _find_row_y( 0.0, 0.0, sp(["M+030", "M-030"])) == (None, 1.0)
assert _find_row_y( 0.0, 0.0, sp(["M+030", "M-030", "U+000", "U+110", "U-110"])) == (None, 1.0)
assert _find_row_y( 0.5, 0.0, sp(["M+030"])) == (None, 1.0)
assert _find_row_y( 0.5, 0.0, sp(["M+030", "M-030"])) == (None, 1.0)
assert _find_row_y( 0.5, 0.0, sp(["M+030", "M-030", "U+000", "U+110", "U-110"])) == (None, 1.0)
assert _find_row_y( 0.5, 1.0, sp(["M+030", "M-030", "U+000", "U+110", "U-110"])) == (-1.0, 1.0)
assert _find_row_y( 1.0, 1.0, sp(["M+030", "M-030", "U+000", "U+110", "U-110"])) == (1.0, 1.0)
assert _find_row_y( 1.0, 1.0, sp(["M+030", "M-030", "U+000", "U+110", "U-110", "B+000", "B+045", "B-045"])) == (1.0, 1.0)
assert _find_row_y( 1.0, -1.0, sp(["M+030", "M-030", "U+000", "U+110", "U-110", "B+000", "B+045", "B-045"])) == (1.0, 1.0)
assert _find_row_y( 1.0, 0.0, sp(["M+110", "M-110", "U+000", "U+110", "U-110", "B+000", "B+045", "B-045"])) == (-1.0, None)
assert _find_row_y(-1.0, 0.0, sp(["M+110", "M-110", "U+000", "U+110", "U-110", "B+000", "B+045", "B-045"])) == (-1.0, -1.0)
assert _find_row_y(-1.0, 0.0, sp(["M+030", "M-030", "M+110", "M-110", "U+000", "U+110", "U-110", "B+000", "B+045", "B-045"])) == (-1.0, -1.0)
def test_allo_extent_find_column_x():
sp = speaker_positions
# Like with test_allo_extent_find_plane_z(), our pass fail criteria
# is a little bit strict.
assert _find_column_x( 0.0, 1.0, 0.0, sp(["M+000", "M+030", "M-030"])) == (0.0, 0.0)
assert _find_column_x(-0.5, 1.0, 0.0, sp(["M+000", "M+030", "M-030"])) == (-1.0, 0.0)
assert _find_column_x( 0.5, 1.0, 0.0, sp(["M+000", "M+030", "M-030"])) == ( 0.0, 1.0)
assert _find_column_x(-1.0, 1.0, 0.0, sp(["M+000", "M+030", "M-030"])) == (-1.0, -1.0)
assert _find_column_x( 1.0, 1.0, 0.0, sp(["M+000", "M+030", "M-030"])) == ( 1.0, 1.0)
assert _find_column_x( 0.0, 1.0, 0.0, sp(["M+030", "M-030", "B-045", "B+000", "B+045"])) == (-1.0, 1.0)
assert _find_column_x( 0.0, 1.0, -1.0, sp(["M+030", "M-030", "B-045", "B+000", "B+045"])) == (0.0, 0.0)
assert _find_column_x( 0.0, -1.0, 0.0, sp(["M+030", "M-030", "M+000", "M+110", "M-110"])) == (-1.0, 1.0)
assert _find_column_x( 0.0, 1.0, 0.0, sp(["M+030", "M-030", "M+000", "M+110", "M-110"])) == (0.0, 0.0)
def test_allo_extent_calc_g_point_separated():
sp = speaker_positions
# Gains for 0+2+0 case with object halfway between the speakers are
# easy to calculate in your head
# (M+030)<---1.0--->(OBJ)<---1.0--->(M-030)
# We expect 2^-0.5 for both gains since we are doing a power preserving pan
xs = [0.0]
ys = [-1.0, -0.5, 0.0, 0.5, 1.0]
zs = [-1.0, -0.5, 0.0, 0.5, 1.0]
spks = sp(["M+030", "M-030"])
gx, gy, gz = _calc_g_point_separated(spks, xs, ys, zs)
npt.assert_almost_equal(gx, np.array([[2.0 ** -0.5], [2.0 ** -0.5]]))
npt.assert_almost_equal(gy, np.array([[1.0] * len(ys)] * len(spks)))
npt.assert_almost_equal(gz, np.array([[1.0] * len(zs)] * len(spks)))
# (M+030)<---1.5--->(OBJ)<---0.5--->(M-030)
# x-gains should be sin(1.5/2 * (PI/2)) and cos(1.5/2 * (PI/2))
xs = [0.5]
gx, gy, gz = _calc_g_point_separated(spks, xs, ys, zs)
gx_M_030 = np.cos(1.5/2 * np.pi/2)
gx_MN030 = np.sin(1.5/2 * np.pi/2)
npt.assert_almost_equal(gx, np.array([[gx_M_030], [gx_MN030]]))
assert gx_M_030 < gx_MN030 # we expect the gain for the M-030 speaker to be bigger (make sure we didn't flip cos/sin...)
# (M+030)<---1.5--->(OBJ)<---0.5--->(M+110)
spks = sp(["M+030", "M+110"])
xs = [-1.0, -0.5, 0.0, 0.5, 1.0]
ys = [-0.5]
zs = [-1.0, -0.5, 0.0, 0.5, 1.0]
gx, gy, gz = _calc_g_point_separated(spks, xs, ys, zs)
gy_M_110 = np.sin(1.5/2 * np.pi/2)
gy_M_030 = np.cos(1.5/2 * np.pi/2)
npt.assert_almost_equal(gx, np.array([[1.0] * len(xs)] * len(spks)))
npt.assert_almost_equal(gy, np.array([[gy_M_030], [gy_M_110]]))
npt.assert_almost_equal(gz, np.array([[1.0] * len(zs)] * len(spks)))
assert gy_M_030 < gy_M_110
# (B+000)<---1.5--->(OBJ)<---0.5--->(U+000)
spks = sp(["B+000", "U+000"])
xs = [-1.0, -0.5, 0.0, 0.5, 1.0]
ys = [-1.0, -0.5, 0.0, 0.5, 1.0]
zs = [0.5]
gx, gy, gz = _calc_g_point_separated(spks, xs, ys, zs)
gz_B_000 = np.cos(1.5/2 * np.pi/2)
gz_U_000 = np.sin(1.5/2 * np.pi/2)
npt.assert_almost_equal(gx, np.array([[1.0] * len(xs)] * len(spks)))
npt.assert_almost_equal(gy, np.array([[1.0] * len(ys)] * len(spks)))
npt.assert_almost_equal(gz, np.array([[gz_B_000], [gz_U_000]]))
assert gz_B_000 < gz_U_000
def test_allo_extent_calc_Nz():
assert _calc_Nz(speaker_positions(["M+000"])) == 20
assert _calc_Nz(speaker_positions(["M+000", "U+000"])) == 20
assert _calc_Nz(speaker_positions(["M+000", "U+000", "B+000"])) == 40
assert _calc_Nz(speaker_positions(["M+000", "M+030", "M-030"])) == 20
assert _calc_Nz(speaker_positions(["M+000", "U+000", "M+030", "M+110", "U+030"])) == 20
assert _calc_Nz(speaker_positions(["M+000", "U+000", "B+000", "M-030", "M-110", "U-030", "B+045", "B-045"])) == 40
def get_gains_check(speakers, position, sx, sy, sz):
"""call get_gains, and check that the return is normalised and positive"""
g = get_gains(speakers, position, sx, sy, sz)
assert np.all(g >= 0)
npt.assert_allclose(np.linalg.norm(g), 1)
return g
def test_allo_extent_get_gains():
sp = speaker_positions
# sizes in y/z directions don't do anything for a 0+2+0 layout
spks = sp(["M+030", "M-030"])
g1 = get_gains_check(spks, np.array([0, 0, 0]), 1.0, 0.0, 0.0)
g2 = get_gains_check(spks, np.array([0, 0, 0]), 1.0, 1.0, 1.0)
| npt.assert_almost_equal(g1, g2) | numpy.testing.assert_almost_equal |
#!/usr/bin/env python
from math import pi
from uncertainties import unumpy, umath, UFloat
import numpy as np
# local modules
from .com import total_com
def combine_bike_rider(bicyclePar, riderPar):
"""
Combines the inertia of the bicycle frame with the
inertia of a rider.
Parameters
----------
bicyclePar : dictionary
The benchmark parameter set of a bicycle.
riderPar : dictionary
The rider's mass, center of mass, and inertia expressed in the
benchmark bicycle reference frame.
Returns
-------
bicyclePar : dictionary
The benchmark bicycle parameters with a rigid rider added to the
bicycle frame.
"""
# list the masses of the rider and bicycle
masses = np.array([riderPar['mB'], bicyclePar['mB']])
# list the centers of mass of the rider and bicycle
coordinates = np.array([[riderPar['xB'], bicyclePar['xB']],
[riderPar['yB'], 0.],
[riderPar['zB'], bicyclePar['zB']]])
# calculate the new mass and center of mass
mT, cT = total_com(coordinates, masses)
# get inertia tensors for the bicycle and rider
IRider = part_inertia_tensor(riderPar, 'B')
IBicycle = part_inertia_tensor(bicyclePar, 'B')
# calculate the distance from the center of mass of each body to the
# center of mass of the combined body
dRider = np.array([riderPar['xB'] - cT[0],
riderPar['yB'] - cT[1],
riderPar['zB'] - cT[2]])
dBicycle = np.array([bicyclePar['xB'] - cT[0],
0.,
bicyclePar['zB'] - cT[2]])
# calculate the total inertia about the total body center of mass
I = (parallel_axis(IRider, riderPar['mB'], dRider) +
parallel_axis(IBicycle, bicyclePar['mB'], dBicycle))
# assign new inertia back to bike
bicyclePar['xB'] = cT[0]
bicyclePar['zB'] = cT[2]
bicyclePar['yB'] = 0.0
bicyclePar['mB'] = mT
bicyclePar['IBxx'] = I[0, 0]
bicyclePar['IBxz'] = I[0, 2]
bicyclePar['IByy'] = I[1, 1]
bicyclePar['IBzz'] = I[2, 2]
return bicyclePar
def compound_pendulum_inertia(m, g, l, T):
'''Returns the moment of inertia for an object hung as a compound
pendulum.
Parameters
----------
m : float
Mass of the pendulum.
g : float
Acceration due to gravity.
l : float
Length of the pendulum.
T : float
The period of oscillation.
Returns
-------
I : float
Moment of interia of the pendulum.
'''
I = (T / 2. / pi)**2. * m * g * l - m * l**2.
return I
def inertia_components(jay, beta):
'''Returns the 2D orthogonal inertia tensor.
When at least three moments of inertia and their axes orientations are
known relative to a common inertial frame of a planar object, the orthoganl
moments of inertia relative the frame are computed.
Parameters
----------
jay : ndarray, shape(n,)
An array of at least three moments of inertia. (n >= 3)
beta : ndarray, shape(n,)
An array of orientation angles corresponding to the moments of inertia
in jay.
Returns
-------
eye : ndarray, shape(3,)
Ixx, Ixz, Izz
'''
sb = unumpy.sin(beta)
cb = unumpy.cos(beta)
betaMat = unumpy.matrix(np.vstack((cb**2, -2 * sb * cb, sb**2)).T)
eye = np.squeeze(np.asarray(np.dot(betaMat.I, jay)))
return eye
def parallel_axis(Ic, m, d):
'''Returns the moment of inertia of a body about a different point.
Parameters
----------
Ic : ndarray, shape(3,3)
The moment of inertia about the center of mass of the body with respect
to an orthogonal coordinate system.
m : float
The mass of the body.
d : ndarray, shape(3,)
The distances along the three ordinates that located the new point
relative to the center of mass of the body.
Returns
-------
I : ndarray, shape(3,3)
The moment of inertia about of the body about a point located by d.
'''
a = d[0]
b = d[1]
c = d[2]
dMat = np.zeros((3, 3), dtype=object)
dMat[0] = | np.array([b**2 + c**2, -a * b, -a * c]) | numpy.array |
"""Test functions.
This module implements several known mathematical functions, that can
be used to test RBFOpt.
Licensed under Revised BSD license, see LICENSE.
(C) Copyright Singapore University of Technology and Design 2014.
(C) Copyright International Business Machines Corporation 2017.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import sys
import math
import numpy as np
from rbfopt.rbfopt_black_box import RbfoptBlackBox
class branin:
"""
Branin function of the Dixon-Szego test set.
"""
@classmethod
def evaluate(cls, x):
assert(len(x)==2)
value = ((x[1] - (5.1/(4*math.pi*math.pi))*x[0]*x[0] +
5/math.pi*x[0] - 6)**2 + 10*(1-1/(8*math.pi)) *
math.cos(x[0]) +10)
return(value)
dimension = 2
var_lower = np.array([-5, 0])
var_upper = | np.array([10, 15]) | numpy.array |
# OpenSeesPy visualization module
# Author: <NAME>
# Faculty of Civil Engineering and Architecture
# Opole University of Technology, Poland
# ver. 0.94, 2020 August
# License: MIT
# Notes:
# 1. matplotlib's plt.axis('equal') does not work for 3d plots
# therefore right angles are not guaranteed to be 90 degrees on the
# plots
import openseespy.opensees as ops # installed from pip
# import opensees as ops # local compilation
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from matplotlib.patches import Circle, Polygon
from matplotlib.animation import FuncAnimation
import matplotlib.tri as tri
# default settings
# fmt: format string setting color, marker and linestyle
# check documentation on matplotlib's plot
# continuous interpolated shape line
fmt_interp = 'b-' # blue solid line, no markers
# element end nodes
fmt_nodes = 'rs' # red square markers, no line
# undeformed model
fmt_undefo = 'g--' # green dashed line, no markers
# section forces
fmt_secforce = 'b-' # blue solid line
# figure left right bottom top offsets
fig_lbrt = (.04, .04, .96, .96)
# azimuth and elevation in degrees
az_el = (-60., 30.)
# figure width and height in centimeters
fig_wi_he = (16., 10.)
def _plot_model_2d(node_labels, element_labels, offset_nd_label, axis_off):
max_x_crd, max_y_crd, max_crd = -np.inf, -np.inf, -np.inf
node_tags = ops.getNodeTags()
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
max_crd = np.amax([max_x_crd, max_y_crd])
_offset = 0.005 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0], ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1], ops.nodeCoord(nd2)[1]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
plt.plot(ex, ey, 'bo-')
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y = _offset, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y = 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y = 0.03, 0.03
plt.text(xt+offset_x, yt+offset_y, f'{ele_tag}', va=va, ha=ha,
color='red')
if node_labels:
for node_tag in node_tags:
if not offset_nd_label == 'above':
offset_nd_label_x, offset_nd_label_y = _offset, _offset
va = 'bottom'
ha = 'left'
else:
offset_nd_label_x, offset_nd_label_y = 0.0, _offset
va = 'bottom'
ha = 'center'
plt.text(ops.nodeCoord(node_tag)[0]+offset_nd_label_x,
ops.nodeCoord(node_tag)[1]+offset_nd_label_y,
f'{node_tag}', va=va, ha=ha, color='blue')
# plt.axis('equal')
# 2d triangular (tri31) elements
elif nen == 3:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
max_crd = np.amax([max_x_crd, max_y_crd])
_offset = 0.005 * max_crd
_offnl = 0.003 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]), 'bo-')
if element_labels:
va = 'center'
ha = 'center'
plt.text(xt, yt, f'{ele_tag}', va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
if not offset_nd_label == 'above':
offset_nd_label_x, offset_nd_label_y = _offnl, _offnl
va = 'bottom'
# va = 'center'
ha = 'left'
else:
offset_nd_label_x, offset_nd_label_y = 0.0, _offnl
va = 'bottom'
ha = 'center'
plt.text(ops.nodeCoord(node_tag)[0]+offset_nd_label_x,
ops.nodeCoord(node_tag)[1]+offset_nd_label_y,
f'{node_tag}', va=va, ha=ha, color='blue')
# 2d quadrilateral (quad) elements
elif nen == 4:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
max_crd = np.amax([max_x_crd, max_y_crd])
_offset = 0.005 * max_crd
_offnl = 0.003 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
# plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]), 'bo-')
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]), 'b-', lw=0.4)
if element_labels:
va = 'center'
ha = 'center'
plt.text(xt, yt, f'{ele_tag}', va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
if not offset_nd_label == 'above':
offset_nd_label_x, offset_nd_label_y = _offnl, _offnl
va = 'bottom'
# va = 'center'
ha = 'left'
else:
offset_nd_label_x, offset_nd_label_y = 0.0, _offnl
va = 'bottom'
ha = 'center'
plt.text(ops.nodeCoord(node_tag)[0]+offset_nd_label_x,
ops.nodeCoord(node_tag)[1]+offset_nd_label_y,
f'{node_tag}', va=va, ha=ha, color='blue')
plt.axis('equal')
def _plot_model_3d(node_labels, element_labels, offset_nd_label, axis_off,
az_el, fig_wi_he, fig_lbrt):
node_tags = ops.getNodeTags()
ele_tags = ops.getEleTags()
azim, elev = az_el
fig_wi, fig_he = fig_wi_he
fleft, fbottom, fright, ftop = fig_lbrt
fig = plt.figure(figsize=(fig_wi/2.54, fig_he/2.54))
fig.subplots_adjust(left=.08, bottom=.08, right=.985, top=.94)
ax = fig.add_subplot(111, projection=Axes3D.name)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(azim=azim, elev=elev)
max_x_crd, max_y_crd, max_z_crd, max_crd = -np.inf, -np.inf, \
-np.inf, -np.inf
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
if z_crd > max_z_crd:
max_z_crd = z_crd
if offset_nd_label == 0 or offset_nd_label == 0.:
_offset = 0.
else:
max_crd = np.amax([max_x_crd, max_y_crd, max_z_crd])
_offset = 0.005 * max_crd
# # work-around fix because of aspect equal bug
# _max_overall = 1.1*max_crd
# _min_overall = -0.1*max_crd
# ax.set_xlim(_min_overall, _max_overall)
# ax.set_ylim(_min_overall, _max_overall)
# ax.set_zlim(_min_overall, _max_overall)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0], ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1], ops.nodeCoord(nd2)[1]])
ez = np.array([ops.nodeCoord(nd1)[2], ops.nodeCoord(nd2)[2]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
zt = sum(ez)/nen
ax.plot(ex, ey, ez, 'bo-')
# fixme: placement of node_tag labels
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y, offset_z = _offset, 0.0, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, _offset, 0.0
elif ez[1]-ez[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y, offset_z = 0.03, 0.03, 0.03
ax.text(xt+offset_x, yt+offset_y, zt+offset_z, f'{ele_tag}',
va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
ax.text(ops.nodeCoord(node_tag)[0]+_offset,
ops.nodeCoord(node_tag)[1]+_offset,
ops.nodeCoord(node_tag)[2]+_offset,
f'{node_tag}', va='bottom', ha='left', color='blue')
# quad in 3d
elif nen == 4:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
if z_crd > max_z_crd:
max_z_crd = z_crd
# ax.plot(np.array([x_crd]),
# np.array([y_crd]),
# np.array([z_crd]), 'ro')
max_crd = np.amax([max_x_crd, max_y_crd, max_z_crd])
_offset = 0.002 * max_crd
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
zt = sum(ez)/nen
ax.plot(np.append(ex, ex[0]),
np.append(ey, ey[0]),
np.append(ez, ez[0]), 'bo-')
# fixme: placement of node_tag labels
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y, offset_z = _offset, 0.0, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, _offset, 0.0
elif ez[1]-ez[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y, offset_z = 0.03, 0.03, 0.03
ax.text(xt+offset_x, yt+offset_y, zt+offset_z, f'{ele_tag}',
va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
ax.text(ops.nodeCoord(node_tag)[0]+_offset,
ops.nodeCoord(node_tag)[1]+_offset,
ops.nodeCoord(node_tag)[2]+_offset,
f'{node_tag}', va='bottom', ha='left', color='blue')
# 8-node brick, 3d model
elif nen == 8:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
if x_crd > max_x_crd:
max_x_crd = x_crd
if y_crd > max_y_crd:
max_y_crd = y_crd
if z_crd > max_z_crd:
max_z_crd = z_crd
# ax.plot(np.array([x_crd]),
# np.array([y_crd]),
# np.array([z_crd]), 'ro')
max_crd = np.amax([max_x_crd, max_y_crd, max_z_crd])
_offset = 0.005 * max_crd
# work-around fix because of aspect equal bug
_max_overall = 1.1*max_crd
_min_overall = -0.1*max_crd
ax.set_xlim(_min_overall, _max_overall)
ax.set_ylim(_min_overall, _max_overall)
ax.set_zlim(_min_overall, _max_overall)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4, nd5, nd6, nd7, nd8 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0],
ops.nodeCoord(nd5)[0],
ops.nodeCoord(nd6)[0],
ops.nodeCoord(nd7)[0],
ops.nodeCoord(nd8)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1],
ops.nodeCoord(nd5)[1],
ops.nodeCoord(nd6)[1],
ops.nodeCoord(nd7)[1],
ops.nodeCoord(nd8)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2],
ops.nodeCoord(nd5)[2],
ops.nodeCoord(nd6)[2],
ops.nodeCoord(nd7)[2],
ops.nodeCoord(nd8)[2]])
# location of label
xt = sum(ex)/nen
yt = sum(ey)/nen
zt = sum(ez)/nen
ax.plot(np.append(ex[0:4], ex[0]),
np.append(ey[0:4], ey[0]),
np.append(ez[0:4], ez[0]), 'bo-')
ax.plot(np.append(ex[4:8], ex[4]),
np.append(ey[4:8], ey[4]),
np.append(ez[4:8], ez[4]), 'bo-')
ax.plot(np.array([ex[0], ex[4]]),
np.array([ey[0], ey[4]]),
np.array([ez[0], ez[4]]), 'bo-')
ax.plot(np.array([ex[1], ex[5]]),
np.array([ey[1], ey[5]]),
np.array([ez[1], ez[5]]), 'bo-')
ax.plot(np.array([ex[2], ex[6]]),
np.array([ey[2], ey[6]]),
np.array([ez[2], ez[6]]), 'bo-')
ax.plot(np.array([ex[3], ex[7]]),
np.array([ey[3], ey[7]]),
np.array([ez[3], ez[7]]), 'bo-')
# fixme: placement of node_tag labels
if element_labels:
if ex[1]-ex[0] == 0:
va = 'center'
ha = 'left'
offset_x, offset_y, offset_z = _offset, 0.0, 0.0
elif ey[1]-ey[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, _offset, 0.0
elif ez[1]-ez[0] == 0:
va = 'bottom'
ha = 'center'
offset_x, offset_y, offset_z = 0.0, 0.0, _offset
else:
va = 'bottom'
ha = 'left'
offset_x, offset_y, offset_z = 0.03, 0.03, 0.03
ax.text(xt+offset_x, yt+offset_y, zt+offset_z, f'{ele_tag}',
va=va, ha=ha, color='red')
if node_labels:
for node_tag in node_tags:
ax.text(ops.nodeCoord(node_tag)[0]+_offset,
ops.nodeCoord(node_tag)[1]+_offset,
ops.nodeCoord(node_tag)[2]+_offset,
f'{node_tag}', va='bottom', ha='left', color='blue')
def plot_model(node_labels=1, element_labels=1, offset_nd_label=False,
axis_off=0, az_el=az_el, fig_wi_he=fig_wi_he,
fig_lbrt=fig_lbrt):
"""Plot defined model of the structure.
Args:
node_labels (int): 1 - plot node labels, 0 - do not plot them;
(default: 1)
element_labels (int): 1 - plot element labels, 0 - do not plot
them; (default: 1)
offset_nd_label (bool): False - do not offset node labels from the
actual node location. This option can enhance visibility.
axis_off (int): 0 - turn off axes, 1 - display axes; (default: 0)
az_el (tuple): contains azimuth and elevation for 3d plots
fig_wi_he (tuple): contains width and height of the figure
fig_lbrt (tuple): a tuple contating left, bottom, right and top offsets
Usage:
``plot_()`` - plot deformed shape with default parameters and
automatically calcutated scale factor.
``plot_defo(interpFlag=0)`` - plot displaced nodes without shape
function interpolation
``plot_defo(sfac=1.5)`` - plot with specified scale factor
``plot_defo(unDefoFlag=0, endDispFlag=0)`` - plot without showing
undeformed (original) mesh and without showing markers at the
element ends.
"""
# az_el - azimut, elevation used for 3d plots only
node_tags = ops.getNodeTags()
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
_plot_model_2d(node_labels, element_labels, offset_nd_label, axis_off)
if axis_off:
plt.axis('off')
elif ndim == 3:
_plot_model_3d(node_labels, element_labels, offset_nd_label, axis_off,
az_el, fig_wi_he, fig_lbrt)
if axis_off:
plt.axis('off')
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
# plt.show() # call this from main py file for more control
def _plot_defo_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes):
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# truss element
if ndf == 2:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
if modeNo:
eux = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[0]])
euy = np.array([ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[1]])
else:
eux = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd2)[0]])
euy = np.array([ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[1]])
# displaced element coordinates (scaled by sfac factor)
edx = np.array([ex[0] + sfac*eux[0], ex[1] + sfac*eux[1]])
edy = np.array([ey[0] + sfac*euy[0], ey[1] + sfac*euy[1]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
plt.plot(edx, edy, fmt_interp)
# beam/frame element
elif ndf == 3:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
# interpolated displacement field
if interpFlag:
xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
plt.plot(xcdi, ycdi, fmt_interp)
# translations of ends
if endDispFlag:
xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
plt.plot(xdi, ydi, fmt_nodes)
plt.axis('equal')
# plt.show() # call this from main py file for more control
# 2d triangular (tri31) elements
elif nen == 3:
for ele_tag in ele_tags:
nd1, nd2, nd3 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# xc = [x, x[0, :]]
# yc = [x, x[0, :]]
# test it with one element
x = ex+sfac*ed[[0, 2, 4]]
y = ey+sfac*ed[[1, 3, 5]]
# x = ex+sfac*ed[[0, 2, 4, 6]]
# y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d quadrilateral (quad) elements
elif nen == 4:
for ele_tag in ele_tags:
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# test it with one element
x = ex+sfac*ed[[0, 2, 4, 6]]
y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d 8-node quadratic elements
# elif nen == 8:
# x = ex+sfac*ed[:, [0, 2, 4, 6, 8, 10, 12, 14]]
# y = ex+sfac*ed[:, [1, 3, 5, 7, 9, 11, 13, 15]]
# t = -1
# n = 0
# for s in range(-1, 1.4, 0.4):
# n += 1
# ...
else:
print(f'\nWarning! Elements not supported yet. nen: {nen}; must be: 2, 3, 4, 8.') # noqa: E501
def _plot_defo_mode_3d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, az_el, fig_wi_he,
fig_lbrt):
ele_tags = ops.getEleTags()
azim, elev = az_el
fig_wi, fig_he = fig_wi_he
fleft, fbottom, fright, ftop = fig_lbrt
fig = plt.figure(figsize=(fig_wi/2.54, fig_he/2.54))
fig.subplots_adjust(left=.08, bottom=.08, right=.985, top=.94)
ax = fig.add_subplot(111, projection=Axes3D.name)
# ax.axis('equal')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(azim=azim, elev=elev)
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# plot: truss and beam/frame elements in 3d
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# plot: beam/frame element in 3d
if ndf == 6:
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd1, modeNo)[3],
ops.nodeEigenvector(nd1, modeNo)[4],
ops.nodeEigenvector(nd1, modeNo)[5],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[3],
ops.nodeEigenvector(nd2, modeNo)[4],
ops.nodeEigenvector(nd2, modeNo)[5]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd1)[3],
ops.nodeDisp(nd1)[4],
ops.nodeDisp(nd1)[5],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2],
ops.nodeDisp(nd2)[3],
ops.nodeDisp(nd2)[4],
ops.nodeDisp(nd2)[5]])
# eo = Eo[i, :]
xloc = ops.eleResponse(ele_tag, 'xlocal')
yloc = ops.eleResponse(ele_tag, 'ylocal')
zloc = ops.eleResponse(ele_tag, 'zlocal')
g = np.vstack((xloc, yloc, zloc))
if unDefoFlag:
plt.plot(ex, ey, ez, fmt_undefo)
# interpolated displacement field
if interpFlag:
xcd, ycd, zcd = beam_defo_interp_3d(ex, ey, ez, g,
ed, sfac, nep)
ax.plot(xcd, ycd, zcd, fmt_interp)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# translations of ends
if endDispFlag:
xd, yd, zd = beam_disp_ends3d(ex, ey, ez, ed, sfac)
ax.plot(xd, yd, zd, fmt_nodes)
# # work-around fix because of aspect equal bug
# xmin, xmax = ax.get_xlim()
# ymin, ymax = ax.get_ylim()
# zmin, zmax = ax.get_zlim()
# min_overall = np.amax([np.abs(xmin), np.abs(ymin), np.abs(zmin)])
# max_overall = np.amax([np.abs(xmax), np.abs(ymax), np.abs(zmax)])
# minmax_overall = max(min_overall, max_overall)
# _max_overall = 1.1 * minmax_overall
# _min_overall = -1.1 * minmax_overall
# ax.set_xlim(_min_overall, _max_overall)
# ax.set_ylim(_min_overall, _max_overall)
# # ax.set_zlim(_min_overall, _max_overall)
# ax.set_zlim(0.0, _max_overall)
# plot: quad in 3d
elif nen == 4:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# plot: shell in 3d
if ndf == 6:
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[2],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[2]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd3)[2],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1],
ops.nodeDisp(nd4)[2]])
if unDefoFlag:
ax.plot(np.append(ex, ex[0]),
np.append(ey, ey[0]),
np.append(ez, ez[0]),
fmt_undefo)
x = ex+sfac*ed[[0, 3, 6, 9]]
y = ey+sfac*ed[[1, 4, 7, 10]]
z = ez+sfac*ed[[2, 5, 8, 11]]
# ax.plot(np.append(x, x[0]),
# np.append(y, y[0]),
# np.append(z, z[0]),
# 'b.-')
# ax.axis('equal')
pts = [[x[0], y[0], z[0]],
[x[1], y[1], z[1]],
[x[2], y[2], z[2]],
[x[3], y[3], z[3]]]
verts = [[pts[0], pts[1], pts[2], pts[3]]]
ax.add_collection3d(Poly3DCollection(verts, linewidths=1,
edgecolors='k',
alpha=.25))
ax.scatter(x, y, z, s=0)
# 8-node brick, 3d model
elif nen == 8:
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4, nd5, nd6, nd7, nd8 = ops.eleNodes(ele_tag)
# element node1-node2, x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0],
ops.nodeCoord(nd5)[0],
ops.nodeCoord(nd6)[0],
ops.nodeCoord(nd7)[0],
ops.nodeCoord(nd8)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1],
ops.nodeCoord(nd5)[1],
ops.nodeCoord(nd6)[1],
ops.nodeCoord(nd7)[1],
ops.nodeCoord(nd8)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2],
ops.nodeCoord(nd3)[2],
ops.nodeCoord(nd4)[2],
ops.nodeCoord(nd5)[2],
ops.nodeCoord(nd6)[2],
ops.nodeCoord(nd7)[2],
ops.nodeCoord(nd8)[2]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[2],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[2],
ops.nodeEigenvector(nd5, modeNo)[0],
ops.nodeEigenvector(nd5, modeNo)[1],
ops.nodeEigenvector(nd5, modeNo)[2],
ops.nodeEigenvector(nd6, modeNo)[0],
ops.nodeEigenvector(nd6, modeNo)[1],
ops.nodeEigenvector(nd6, modeNo)[2],
ops.nodeEigenvector(nd7, modeNo)[0],
ops.nodeEigenvector(nd7, modeNo)[1],
ops.nodeEigenvector(nd7, modeNo)[2],
ops.nodeEigenvector(nd8, modeNo)[0],
ops.nodeEigenvector(nd8, modeNo)[1],
ops.nodeEigenvector(nd8, modeNo)[2]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd1)[2],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd2)[2],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd3)[2],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1],
ops.nodeDisp(nd4)[2],
ops.nodeDisp(nd5)[0],
ops.nodeDisp(nd5)[1],
ops.nodeDisp(nd5)[2],
ops.nodeDisp(nd6)[0],
ops.nodeDisp(nd6)[1],
ops.nodeDisp(nd6)[2],
ops.nodeDisp(nd7)[0],
ops.nodeDisp(nd7)[1],
ops.nodeDisp(nd7)[2],
ops.nodeDisp(nd8)[0],
ops.nodeDisp(nd8)[1],
ops.nodeDisp(nd8)[2]])
if unDefoFlag:
ax.plot(np.append(ex[0:4], ex[0]),
np.append(ey[0:4], ey[0]),
np.append(ez[0:4], ez[0]), fmt_undefo)
ax.plot(np.append(ex[4:8], ex[4]),
np.append(ey[4:8], ey[4]),
np.append(ez[4:8], ez[4]), fmt_undefo)
ax.plot(np.array([ex[0], ex[4]]),
np.array([ey[0], ey[4]]),
np.array([ez[0], ez[4]]), fmt_undefo)
ax.plot(np.array([ex[1], ex[5]]),
np.array([ey[1], ey[5]]),
np.array([ez[1], ez[5]]), fmt_undefo)
ax.plot(np.array([ex[2], ex[6]]),
np.array([ey[2], ey[6]]),
np.array([ez[2], ez[6]]), fmt_undefo)
ax.plot(np.array([ex[3], ex[7]]),
np.array([ey[3], ey[7]]),
np.array([ez[3], ez[7]]), fmt_undefo)
x = ex+sfac*ed[[0, 3, 6, 9, 12, 15, 18, 21]]
y = ey+sfac*ed[[1, 4, 7, 10, 13, 16, 19, 22]]
z = ez+sfac*ed[[2, 5, 8, 11, 14, 17, 20, 23]]
ax.plot(np.append(x[:4], x[0]),
np.append(y[:4], y[0]),
np.append(z[:4], z[0]),
'b.-')
ax.plot(np.append(x[4:8], x[4]),
np.append(y[4:8], y[4]),
np.append(z[4:8], z[4]),
'b.-')
ax.plot(np.array([x[0], x[4]]),
np.array([y[0], y[4]]),
np.array([z[0], z[4]]), 'b.-')
ax.plot(np.array([x[1], x[5]]),
np.array([y[1], y[5]]),
np.array([z[1], z[5]]), 'b.-')
ax.plot(np.array([x[2], x[6]]),
np.array([y[2], y[6]]),
np.array([z[2], z[6]]), 'b.-')
ax.plot(np.array([x[3], x[7]]),
np.array([y[3], y[7]]),
np.array([z[3], z[7]]), 'b.-')
# ax.axis('equal')
# work-around fix because of aspect equal bug
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
zmin, zmax = ax.get_zlim()
min_overall = np.amax([np.abs(xmin), np.abs(ymin), np.abs(zmin)])
max_overall = np.amax([np.abs(xmax), np.abs(ymax), np.abs(zmax)])
minmax_overall = max(min_overall, max_overall)
_min_overall = -1.1 * minmax_overall
_max_overall = 1.1 * minmax_overall
ax.set_xlim(0.3*_min_overall, 0.3*_max_overall)
ax.set_ylim(0.3*_min_overall, 0.3*_max_overall)
# ax.set_zlim(_min_overall, _max_overall)
ax.set_zlim(0.0, _max_overall)
def plot_defo(sfac=False, nep=17, unDefoFlag=1, fmt_undefo=fmt_undefo,
interpFlag=1, endDispFlag=1, fmt_interp=fmt_interp,
fmt_nodes=fmt_nodes, Eo=0, az_el=az_el, fig_wi_he=fig_wi_he,
fig_lbrt=fig_lbrt):
"""Plot deformed shape of the structure.
Args:
sfac (float): scale factor to increase/decrease displacements obtained
from FE analysis. If not specified (False), sfac is automatically
calculated based on the maximum overall displacement and this
maximum displacement is plotted as 20 percent (hordcoded) of
the maximum model dimension.
interpFlag (int): 1 - use interpolated deformation using shape
function, 0 - do not use interpolation, just show displaced element
nodes (default is 1)
nep (int): number of evaluation points for shape function interpolation
(default: 17)
Usage:
``plot_defo()`` - plot deformed shape with default parameters and
automatically calcutated scale factor.
``plot_defo(interpFlag=0)`` - plot simplified deformation by
displacing the nodes connected with straight lines (shape function
interpolation)
``plot_defo(sfac=1.5)`` - plot with specified scale factor
``plot_defo(unDefoFlag=0, endDispFlag=0)`` - plot without showing
undeformed (original) mesh and without showing markers at the
element ends.
"""
node_tags = ops.getNodeTags()
# calculate sfac
min_x, min_y, min_z = np.inf, np.inf, np.inf
max_x, max_y, max_z = -np.inf, -np.inf, -np.inf
max_ux, max_uy, max_uz = -np.inf, -np.inf, -np.inf
ratio = 0.1
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
ux = ops.nodeDisp(node_tag)[0]
uy = ops.nodeDisp(node_tag)[1]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
dxmax = max_x - min_x
dymax = max_y - min_y
dlmax = max(dxmax, dymax)
edmax = max(max_ux, max_uy)
sfac = ratio * dlmax/edmax
if sfac > 1000.:
print("""\nWarning!\nsfac is quite large - perhaps try to specify \
sfac value yourself.
This usually happens when translational DOFs are too small\n\n""")
_plot_defo_mode_2d(0, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes)
elif ndim == 3:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
ux = ops.nodeDisp(node_tag)[0]
uy = ops.nodeDisp(node_tag)[1]
uz = ops.nodeDisp(node_tag)[2]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
min_z = min(min_z, z_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_z = max(max_z, z_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
max_uz = max(max_uz, np.abs(uz))
dxmax = max_x - min_x
dymax = max_y - min_y
dzmax = max_z - min_z
dlmax = max(dxmax, dymax, dzmax)
edmax = max(max_ux, max_uy, max_uz)
sfac = ratio * dlmax/edmax
_plot_defo_mode_3d(0, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, az_el,
fig_wi_he, fig_lbrt)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def _anim_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, fig_wi_he, xlim, ylim,
lw):
fig_wi, fig_he = fig_wi_he
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# truss element
if ndf == 2:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
if modeNo:
eux = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[0]])
euy = np.array([ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[1]])
else:
eux = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd2)[0]])
euy = np.array([ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[1]])
# displaced element coordinates (scaled by sfac factor)
edx = np.array([ex[0] + sfac*eux[0], ex[1] + sfac*eux[1]])
edy = np.array([ey[0] + sfac*euy[0], ey[1] + sfac*euy[1]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
plt.plot(edx, edy, fmt_interp)
# beam/frame element anim eigen
elif ndf == 3:
fig, ax = plt.subplots(figsize=(fig_wi/2.54, fig_he/2.54))
ax.axis('equal')
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
nel = len(ele_tags)
Ex = np.zeros((nel, 2))
Ey = np.zeros((nel, 2))
Ed = np.zeros((nel, 6))
# time vector for one cycle (period)
n_frames = 32 + 1
t = np.linspace(0., 2*np.pi, n_frames)
lines = []
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
Ex[i, :] = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
Ey[i, :] = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
Ed[i, :] = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd1, modeNo)[2],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[2]])
lines.append(ax.plot([], [], fmt_nodes, lw=lw)[0])
def init():
for j, ele_tag in enumerate(ele_tags):
lines[j].set_data([], [])
return lines
def animate(i):
for j, ele_tag in enumerate(ele_tags):
if interpFlag:
xcdi, ycdi = beam_defo_interp_2d(Ex[j, :],
Ey[j, :],
Ed[j, :],
sfac*np.cos(t[i]),
nep)
lines[j].set_data(xcdi, ycdi)
else:
xdi, ydi = beam_disp_ends(Ex[j, :], Ey[j, :], Ed[j, :],
sfac*np.cos(t[i]))
lines[j].set_data(xdi, ydi)
# plt.plot(xcdi, ycdi, fmt_interp)
return lines
FuncAnimation(fig, animate, init_func=init,
frames=n_frames, interval=50, blit=True)
# plt.axis('equal')
# plt.show() # call this from main py file for more control
# 2d triangular elements - todo
# elif nen == 3:
# x = ex+sfac*ed[:, [0, 2, 4]]
# y = ex+sfac*ed[:, [1, 3, 5]]
# xc = [x, x[0, :]]
# yc = [x, x[0, :]]
# 2d quadrilateral (quad) elements
elif nen == 4:
for ele_tag in ele_tags:
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
if modeNo:
ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
ops.nodeEigenvector(nd1, modeNo)[1],
ops.nodeEigenvector(nd2, modeNo)[0],
ops.nodeEigenvector(nd2, modeNo)[1],
ops.nodeEigenvector(nd3, modeNo)[0],
ops.nodeEigenvector(nd3, modeNo)[1],
ops.nodeEigenvector(nd4, modeNo)[0],
ops.nodeEigenvector(nd4, modeNo)[1]])
else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# test it with one element
x = ex+sfac*ed[[0, 2, 4, 6]]
y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d 8-node quadratic elements
# elif nen == 8:
# x = ex+sfac*ed[:, [0, 2, 4, 6, 8, 10, 12, 14]]
# y = ex+sfac*ed[:, [1, 3, 5, 7, 9, 11, 13, 15]]
# t = -1
# n = 0
# for s in range(-1, 1.4, 0.4):
# n += 1
# ...
else:
print(f'\nWarning! Elements not supported yet. nen: {nen}; must be: 2, 3, 4, 8.') # noqa: E501
def anim_mode(modeNo, sfac=False, nep=17, unDefoFlag=1, fmt_undefo=fmt_undefo,
interpFlag=1, endDispFlag=1, fmt_interp=fmt_interp,
fmt_nodes='b-', Eo=0, az_el=az_el, fig_wi_he=fig_wi_he,
fig_lbrt=fig_lbrt, xlim=[0, 1], ylim=[0, 1], lw=3.):
"""Make animation of a mode shape obtained from eigenvalue solution.
Args:
modeNo (int): indicates which mode shape to animate.
Eds (ndarray): An array (n_eles x n_dof_per_element) containing
displacements per element.
timeV (1darray): vector of discretized time values
sfac (float): scale factor
nep (integer): number of evaluation points inside the element and
including both element ends
unDefoFlag (integer): 1 - plot the undeformed model (mesh), 0 - do not
plot the mesh
interpFlag (integer): 1 - interpolate deformation inside element,
0 - no interpolation
endDispFlag (integer): 1 - plot marks at element ends, 0 - no marks
fmt_interp (string): format line string for interpolated (continuous)
deformated shape. The format contains information on line color,
style and marks as in the standard matplotlib plot function.
fmt_nodes (string): format string for the marks of element ends
az_el (tuple): a tuple containing the azimuth and elevation
fig_lbrt (tuple): a tuple contating left, bottom, right and top offsets
fig_wi_he (tuple): contains width and height of the figure
Examples:
Notes:
See also:
"""
node_tags = ops.getNodeTags()
# calculate sfac
# min_x, min_y, min_z = np.inf, np.inf, np.inf
# max_x, max_y, max_z = -np.inf, -np.inf, -np.inf
# max_ux, max_uy, max_uz = -np.inf, -np.inf, -np.inf
min_x, min_y = np.inf, np.inf
max_x, max_y = -np.inf, -np.inf
max_ux, max_uy = -np.inf, -np.inf
ratio = 0.1
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
ux = ops.nodeEigenvector(node_tag, modeNo)[0]
uy = ops.nodeEigenvector(node_tag, modeNo)[1]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
dxmax = max_x - min_x
dymax = max_y - min_y
dlmax = max(dxmax, dymax)
edmax = max(max_ux, max_uy)
sfac = ratio * dlmax/edmax
_anim_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo, interpFlag,
endDispFlag, fmt_interp, fmt_nodes, fig_wi_he, xlim,
ylim, lw)
# elif ndim == 3:
# if not sfac:
# for node_tag in node_tags:
# x_crd = ops.nodeCoord(node_tag)[0]
# y_crd = ops.nodeCoord(node_tag)[1]
# z_crd = ops.nodeCoord(node_tag)[2]
# ux = ops.nodeEigenvector(node_tag, modeNo)[0]
# uy = ops.nodeEigenvector(node_tag, modeNo)[1]
# uz = ops.nodeEigenvector(node_tag, modeNo)[2]
# min_x = min(min_x, x_crd)
# min_y = min(min_y, y_crd)
# min_z = min(min_z, z_crd)
# max_x = max(max_x, x_crd)
# max_y = max(max_y, y_crd)
# max_z = max(max_z, z_crd)
# max_ux = max(max_ux, np.abs(ux))
# max_uy = max(max_uy, np.abs(uy))
# max_uz = max(max_uz, np.abs(uz))
# dxmax = max_x - min_x
# dymax = max_y - min_y
# dzmax = max_z - min_z
# dlmax = max(dxmax, dymax, dzmax)
# edmax = max(max_ux, max_uy, max_uz)
# sfac = ratio * dlmax/edmax
# _plot_defo_mode_3d(modeNo, sfac, nep, unDefoFlag, fmt_undefo,
# interpFlag, endDispFlag, fmt_interp, fmt_nodes,
# Eo, az_el, fig_wi_he, fig_lbrt)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def plot_mode_shape(modeNo, sfac=False, nep=17, unDefoFlag=1,
fmt_undefo=fmt_undefo, interpFlag=1, endDispFlag=1,
fmt_interp=fmt_interp, fmt_nodes=fmt_nodes, Eo=0,
az_el=az_el, fig_wi_he=fig_wi_he, fig_lbrt=fig_lbrt):
"""Plot mode shape of the structure obtained from eigenvalue analysis.
Args:
modeNo (int): indicates which mode shape to plot
sfac (float): scale factor to increase/decrease displacements obtained
from FE analysis. If not specified (False), sfac is automatically
calculated based on the maximum overall displacement and this
maximum displacement is plotted as 20 percent (hordcoded) of
the maximum model dimension.
interpFlag (int): 1 - use interpolated deformation using shape
function, 0 - do not use interpolation, just show displaced element
nodes (default is 1)
nep (int): number of evaluation points for shape function interpolation
(default: 17)
Usage:
``plot_mode_shape(1)`` - plot the first mode shape with default parameters
and automatically calcutated scale factor.
``plot_mode_shape(2, interpFlag=0)`` - plot the 2nd mode shape by
displacing the nodes connected with straight lines (shape function
interpolation)
``plot_mode_shape(3, sfac=1.5)`` - plot the 3rd mode shape with specified
scale factor
``plot_mode_shape(4, unDefoFlag=0, endDispFlag=0)`` - plot the 4th mode
shape without showing undeformed (original) mesh and without showing
markers at the element ends.
Examples:
Notes:
See also:
"""
node_tags = ops.getNodeTags()
# calculate sfac
min_x, min_y, min_z = np.inf, np.inf, np.inf
max_x, max_y, max_z = -np.inf, -np.inf, -np.inf
max_ux, max_uy, max_uz = -np.inf, -np.inf, -np.inf
ratio = 0.1
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
ux = ops.nodeEigenvector(node_tag, modeNo)[0]
uy = ops.nodeEigenvector(node_tag, modeNo)[1]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
dxmax = max_x - min_x
dymax = max_y - min_y
dlmax = max(dxmax, dymax)
edmax = max(max_ux, max_uy)
sfac = ratio * dlmax/edmax
_plot_defo_mode_2d(modeNo, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes)
elif ndim == 3:
if not sfac:
for node_tag in node_tags:
x_crd = ops.nodeCoord(node_tag)[0]
y_crd = ops.nodeCoord(node_tag)[1]
z_crd = ops.nodeCoord(node_tag)[2]
ux = ops.nodeEigenvector(node_tag, modeNo)[0]
uy = ops.nodeEigenvector(node_tag, modeNo)[1]
uz = ops.nodeEigenvector(node_tag, modeNo)[2]
min_x = min(min_x, x_crd)
min_y = min(min_y, y_crd)
min_z = min(min_z, z_crd)
max_x = max(max_x, x_crd)
max_y = max(max_y, y_crd)
max_z = max(max_z, z_crd)
max_ux = max(max_ux, np.abs(ux))
max_uy = max(max_uy, np.abs(uy))
max_uz = max(max_uz, np.abs(uz))
dxmax = max_x - min_x
dymax = max_y - min_y
dzmax = max_z - min_z
dlmax = max(dxmax, dymax, dzmax)
edmax = max(max_ux, max_uy, max_uz)
sfac = ratio * dlmax/edmax
_plot_defo_mode_3d(modeNo, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes,
az_el, fig_wi_he, fig_lbrt)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def rot_transf_3d(ex, ey, ez, g):
Lxyz = np.array([ex[1]-ex[0], ey[1]-ey[0], ez[1]-ez[0]])
L = np.sqrt(Lxyz @ Lxyz)
z = np.zeros((3, 3))
G = np.block([[g, z, z, z],
[z, g, z, z],
[z, z, g, z],
[z, z, z, g]])
return G, L
def beam_defo_interp_2d(ex, ey, u, sfac, nep=17):
"""
Interpolate element displacements at nep points.
Parametrs:
ex, ey : element x, y coordinates,
u : element nodal displacements
sfac : scale factor for deformation plot
nep : number of evaluation points (including end nodes)
Returns:
crd_xc, crd_yc : x, y coordinates of interpolated (at nep points)
beam deformation required for plot_defo() function
"""
Lxy = np.array([ex[1]-ex[0], ey[1]-ey[0]])
L = np.sqrt(Lxy @ Lxy)
cosa, cosb = Lxy / L
G = np.array([[cosa, cosb, 0., 0., 0., 0.],
[-cosb, cosa, 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., cosa, cosb, 0.],
[0., 0., 0., -cosb, cosa, 0.],
[0., 0., 0., 0., 0., 1.]])
u_l = G @ u
xl = np.linspace(0., L, num=nep)
one = np.ones(xl.shape)
# longitudinal deformation (1)
N_a = np.column_stack((one - xl/L, xl/L))
u_ac = N_a @ np.array([u_l[0], u_l[3]])
# transverse deformation (2)
N_t = np.column_stack((one - 3*xl**2/L**2 + 2*xl**3/L**3,
xl - 2*xl**2/L + xl**3/L**2,
3*xl**2/L**2 - 2*xl**3/L**3,
-xl**2/L + xl**3/L**2))
u_tc = N_t @ np.array([u_l[1], u_l[2], u_l[4], u_l[5]])
# combined two row vectors
# 1-st vector longitudinal deformation (1)
# 2-nd vector transverse deformation (2)
u_atc = np.vstack((u_ac, u_tc))
# project longitudinal (u_ac) and transverse deformation
# (local u and v) to (global u and v)
G1 = np.array([[cosa, -cosb],
[cosb, cosa]])
u_xyc = G1 @ u_atc
# discretize element coordinates
# first row = X + [0 dx 2dx ... 4-dx 4]
# second row = Y + [0 dy 2dy ... 3-dy 3]
xy_c = np.vstack((np.linspace(ex[0], ex[1], num=nep),
np.linspace(ey[0], ey[1], num=nep)))
# Continuous x, y displacement coordinates
crd_xc = xy_c[0, :] + sfac * u_xyc[0, :]
crd_yc = xy_c[1, :] + sfac * u_xyc[1, :]
# latex_array(ecrd_xc)
# latex_array(ecrd_yc)
return crd_xc, crd_yc
def beam_defo_interp_3d(ex, ey, ez, g, u, sfac, nep=17):
"""
3d beam version of beam_defo_interp_2d.
"""
G, L = rot_transf_3d(ex, ey, ez, g)
ul = G @ u
_, crd_yc = beam_defo_interp_2d(np.array([0., L]),
np.array([0., 0.]),
np.array([ul[0], ul[1], ul[5], ul[6],
ul[7], ul[11]]), sfac, nep)
crd_xc, crd_zc = beam_defo_interp_2d(np.array([0., L]),
np.array([0., 0.]),
np.array([ul[0], ul[2], -ul[4], ul[6],
ul[8], -ul[10]]), sfac, nep)
xl = np.linspace(0., L, num=nep)
crd_xc = crd_xc - xl
crd_xyzc = np.vstack([crd_xc, crd_yc, crd_zc])
u_xyzc = np.transpose(g) @ crd_xyzc
xyz_c = np.vstack((np.linspace(ex[0], ex[1], num=nep),
np.linspace(ey[0], ey[1], num=nep),
np.linspace(ez[0], ez[1], num=nep)))
crd_xc = xyz_c[0, :] + u_xyzc[0, :]
crd_yc = xyz_c[1, :] + u_xyzc[1, :]
crd_zc = xyz_c[2, :] + u_xyzc[2, :]
return crd_xc, crd_yc, crd_zc
def beam_disp_ends(ex, ey, d, sfac):
"""
Calculate the element deformation at element ends only.
"""
# indx: 0 1 2 3 4 5
# Ed = ux1 uy1 ur1 ux2 uy2 ur2
exd = np.array([ex[0] + sfac*d[0], ex[1] + sfac*d[3]])
eyd = np.array([ey[0] + sfac*d[1], ey[1] + sfac*d[4]])
return exd, eyd
def beam_disp_ends3d(ex, ey, ez, d, sfac):
"""
Calculate the element deformation at element ends only.
"""
# indx: 0 1 2 3 4 5 6 7 8 9 10 11
# Ed = ux1 uy1 uz1 rx1 ry1 rz1 ux2 uy2 uz2 rx2 ry2 rz2
exd = np.array([ex[0] + sfac*d[0], ex[1] + sfac*d[6]])
eyd = np.array([ey[0] + sfac*d[1], ey[1] + sfac*d[7]])
ezd = np.array([ez[0] + sfac*d[2], ez[1] + sfac*d[8]])
return exd, eyd, ezd
# plot_fiber_section is inspired by Matlab ``plotSection.zip``
# written by <NAME> available at
# http://users.ntua.gr/divamva/software.html
def plot_fiber_section(fib_sec_list, fillflag=1,
matcolor=['y', 'b', 'r', 'g', 'm', 'k']):
"""Plot fiber cross-section.
Args:
fib_sec_list (list): list of lists in the format similar to the input
given for in
fillflag (int): 1 - filled fibers with color specified in matcolor
list, 0 - no color, only the outline of fibers
matcolor (list): sequence of colors for various material tags
assigned to fibers
Examples:
::
fib_sec_1 = [['section', 'Fiber', 1, '-GJ', 1.0e6],
['patch', 'quad', 1, 4, 1, 0.032, 0.317, -0.311, 0.067, -0.266, 0.005, 0.077, 0.254], # noqa: E501
['patch', 'quad', 1, 1, 4, -0.075, 0.144, -0.114, 0.116, 0.075, -0.144, 0.114, -0.116], # noqa: E501
['patch', 'quad', 1, 4, 1, 0.266, -0.005, -0.077, -0.254, -0.032, -0.317, 0.311, -0.067] # noqa: E501
]
opsv.fib_sec_list_to_cmds(fib_sec_1)
matcolor = ['r', 'lightgrey', 'gold', 'w', 'w', 'w']
opsv.plot_fiber_section(fib_sec_1, matcolor=matcolor)
plt.axis('equal')
# plt.savefig(f'{kateps}fibsec_rc.png')
plt.show()
Notes:
``fib_sec_list`` can be reused by means of a python helper function
``ops_vis.fib_sec_list_to_cmds(fib_sec_list_1)``
See also:
``ops_vis.fib_sec_list_to_cmds()``
"""
fig, ax = plt.subplots()
ax.set_xlabel('z')
ax.set_ylabel('y')
ax.grid(False)
for item in fib_sec_list:
if item[0] == 'layer':
matTag = item[2]
if item[1] == 'straight':
n_bars = item[3]
As = item[4]
Iy, Iz, Jy, Jz = item[5], item[6], item[7], item[8]
r = np.sqrt(As / np.pi)
Y = np.linspace(Iy, Jy, n_bars)
Z = np.linspace(Iz, Jz, n_bars)
for zi, yi in zip(Z, Y):
bar = Circle((zi, yi), r, ec='k', fc='k', zorder=10)
ax.add_patch(bar)
if item[0] == 'patch':
matTag, nIJ, nJK = item[2], item[3], item[4]
if item[1] == 'quad' or item[1] == 'quadr':
Iy, Iz, Jy, Jz = item[5], item[6], item[7], item[8]
Ky, Kz, Ly, Lz = item[9], item[10], item[11], item[12]
if item[1] == 'rect':
Iy, Iz, Ky, Kz = item[5], item[6], item[7], item[8]
Jy, Jz, Ly, Lz = Ky, Iz, Iy, Kz
# check for convexity (vector products)
outIJxIK = (Jy-Iy)*(Kz-Iz) - (Ky-Iy)*(Jz-Iz)
outIKxIL = (Ky-Iy)*(Lz-Iz) - (Ly-Iy)*(Kz-Iz)
# check if I, J, L points are colinear
outIJxIL = (Jy-Iy)*(Lz-Iz) - (Ly-Iy)*(Jz-Iz)
# outJKxJL = (Ky-Jy)*(Lz-Jz) - (Ly-Jy)*(Kz-Jz)
if outIJxIK <= 0 or outIKxIL <= 0 or outIJxIL <= 0:
print('\nWarning! Patch quad is non-convex or counter-clockwise defined or has at least 3 colinear points in line') # noqa: E501
IJz, IJy = np.linspace(Iz, Jz, nIJ+1), np.linspace(Iy, Jy, nIJ+1)
JKz, JKy = np.linspace(Jz, Kz, nJK+1), np.linspace(Jy, Ky, nJK+1)
LKz, LKy = np.linspace(Lz, Kz, nIJ+1), np.linspace(Ly, Ky, nIJ+1)
ILz, ILy = np.linspace(Iz, Lz, nJK+1), np.linspace(Iy, Ly, nJK+1)
if fillflag:
Z = np.zeros((nIJ+1, nJK+1))
Y = np.zeros((nIJ+1, nJK+1))
for j in range(nIJ+1):
Z[j, :] = np.linspace(IJz[j], LKz[j], nJK+1)
Y[j, :] = np.linspace(IJy[j], LKy[j], nJK+1)
for j in range(nIJ):
for k in range(nJK):
zy = np.array([[Z[j, k], Y[j, k]],
[Z[j, k+1], Y[j, k+1]],
[Z[j+1, k+1], Y[j+1, k+1]],
[Z[j+1, k], Y[j+1, k]]])
poly = Polygon(zy, True, ec='k', fc=matcolor[matTag-1])
ax.add_patch(poly)
else:
# horizontal lines
for az, bz, ay, by in zip(IJz, LKz, IJy, LKy):
plt.plot([az, bz], [ay, by], 'b-', zorder=1)
# vertical lines
for az, bz, ay, by in zip(JKz, ILz, JKy, ILy):
plt.plot([az, bz], [ay, by], 'b-', zorder=1)
def fib_sec_list_to_cmds(fib_sec_list):
"""Reuses fib_sec_list to define fiber section in OpenSees.
At present it is not possible to extract fiber section data from
the OpenSees domain, this function is a workaround. The idea is to
prepare data similar to the one the regular OpenSees commands
(``section('Fiber', ...)``, ``fiber()``, ``patch()`` and/or
``layer()``) require.
Args:
fib_sec_list (list): is a list of fiber section data. First sub-list
also defines the torsional stiffness (GJ).
Warning:
If you use this function, do not issue the regular OpenSees:
section, Fiber, Patch or Layer commands.
See also:
``ops_vis.plot_fiber_section()``
"""
for dat in fib_sec_list:
if dat[0] == 'section':
secTag, GJ = dat[2], dat[4]
ops.section('Fiber', secTag, '-GJ', GJ)
if dat[0] == 'layer':
matTag = dat[2]
if dat[1] == 'straight':
n_bars = dat[3]
As = dat[4]
Iy, Iz, Jy, Jz = dat[5], dat[6], dat[7], dat[8]
ops.layer('straight', matTag, n_bars, As, Iy, Iz, Jy, Jz)
if dat[0] == 'patch':
matTag = dat[2]
nIJ = dat[3]
nJK = dat[4]
if dat[1] == 'quad' or dat[1] == 'quadr':
Iy, Iz, Jy, Jz = dat[5], dat[6], dat[7], dat[8]
Ky, Kz, Ly, Lz = dat[9], dat[10], dat[11], dat[12]
ops.patch('quad', matTag, nIJ, nJK, Iy, Iz, Jy, Jz, Ky, Kz,
Ly, Lz)
if dat[1] == 'rect':
Iy, Iz, Ky, Kz = dat[5], dat[6], dat[7], dat[8]
Jy, Jz, Ly, Lz = Ky, Iz, Iy, Kz
ops.patch('rect', matTag, nIJ, nJK, Iy, Iz, Ky, Kz)
def _anim_defo_2d(Eds, timeV, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes, fig_wi_he,
xlim, ylim):
fig_wi, fig_he = fig_wi_he
ele_tags = ops.getEleTags()
nen = np.shape(ops.eleNodes(ele_tags[0]))[0]
# truss and beam/frame elements
if nen == 2:
ndf = np.shape(ops.nodeDOFs(ops.eleNodes(ele_tags[0])[0]))[0]
# truss element
if ndf == 2:
for ele_tag in ele_tags:
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
eux = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd2)[0]])
euy = np.array([ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[1]])
# displaced element coordinates (scaled by sfac factor)
edx = np.array([ex[0] + sfac*eux[0], ex[1] + sfac*eux[1]])
edy = np.array([ey[0] + sfac*euy[0], ey[1] + sfac*euy[1]])
if unDefoFlag:
plt.plot(ex, ey, fmt_undefo)
plt.plot(edx, edy, fmt_interp)
# beam/frame element anim defo
elif ndf == 3:
fig, ax = plt.subplots(figsize=(fig_wi/2.54, fig_he/2.54))
ax.axis('equal')
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
# ax.grid()
nel = len(ele_tags)
Ex = np.zeros((nel, 2))
Ey = np.zeros((nel, 2))
# no of frames equal to time intervals
n_frames, _, _ = np.shape(Eds)
lines = []
# time_text = ax.set_title('') # does not work
time_text = ax.text(.05, .95, '', transform=ax.transAxes)
for i, ele_tag in enumerate(ele_tags):
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
Ex[i, :] = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
Ey[i, :] = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
lines.append(ax.plot([], [], fmt_nodes, lw=3)[0])
def init():
for j, ele_tag in enumerate(ele_tags):
lines[j].set_data([], [])
time_text.set_text('')
return tuple(lines) + (time_text,)
def animate(i):
for j, ele_tag in enumerate(ele_tags):
if interpFlag:
xcdi, ycdi = beam_defo_interp_2d(Ex[j, :],
Ey[j, :],
Eds[i, j, :],
sfac,
nep)
lines[j].set_data(xcdi, ycdi)
else:
xdi, ydi = beam_disp_ends(Ex[j, :], Ey[j, :],
Eds[i, j, :], sfac)
lines[j].set_data(xdi, ydi)
# plt.plot(xcdi, ycdi, fmt_interp)
# time_text.set_text(f'f')
time_text.set_text(f'frame: {i+1}/{n_frames}, \
time: {timeV[i]:.3f} s')
return tuple(lines) + (time_text,)
FuncAnimation(fig, animate, init_func=init, frames=n_frames,
interval=50, blit=True, repeat=False)
# plt.axis('equal')
# plt.show() # call this from main py file for more control
# 2d triangular elements
# elif nen == 3:
# x = ex+sfac*ed[:, [0, 2, 4]]
# y = ex+sfac*ed[:, [1, 3, 5]]
# xc = [x, x[0, :]]
# yc = [x, x[0, :]]
# 2d quadrilateral (quad) elements
elif nen == 4:
for ele_tag in ele_tags:
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0],
ops.nodeCoord(nd3)[0],
ops.nodeCoord(nd4)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1],
ops.nodeCoord(nd3)[1],
ops.nodeCoord(nd4)[1]])
# if modeNo:
# ed = np.array([ops.nodeEigenvector(nd1, modeNo)[0],
# ops.nodeEigenvector(nd1, modeNo)[1],
# ops.nodeEigenvector(nd2, modeNo)[0],
# ops.nodeEigenvector(nd2, modeNo)[1],
# ops.nodeEigenvector(nd3, modeNo)[0],
# ops.nodeEigenvector(nd3, modeNo)[1],
# ops.nodeEigenvector(nd4, modeNo)[0],
# ops.nodeEigenvector(nd4, modeNo)[1]])
# else:
ed = np.array([ops.nodeDisp(nd1)[0],
ops.nodeDisp(nd1)[1],
ops.nodeDisp(nd2)[0],
ops.nodeDisp(nd2)[1],
ops.nodeDisp(nd3)[0],
ops.nodeDisp(nd3)[1],
ops.nodeDisp(nd4)[0],
ops.nodeDisp(nd4)[1]])
if unDefoFlag:
plt.plot(np.append(ex, ex[0]), np.append(ey, ey[0]),
fmt_undefo)
# xcdi, ycdi = beam_defo_interp_2d(ex, ey, ed, sfac, nep)
# xdi, ydi = beam_disp_ends(ex, ey, ed, sfac)
# # interpolated displacement field
# plt.plot(xcdi, ycdi, 'b.-')
# # translations of ends only
# plt.plot(xdi, ydi, 'ro')
# test it with one element
x = ex+sfac*ed[[0, 2, 4, 6]]
y = ey+sfac*ed[[1, 3, 5, 7]]
plt.plot(np.append(x, x[0]), np.append(y, y[0]), 'b.-')
plt.axis('equal')
# 2d 8-node quadratic elements
# elif nen == 8:
# x = ex+sfac*ed[:, [0, 2, 4, 6, 8, 10, 12, 14]]
# y = ex+sfac*ed[:, [1, 3, 5, 7, 9, 11, 13, 15]]
# t = -1
# n = 0
# for s in range(-1, 1.4, 0.4):
# n += 1
# ...
else:
print(f'\nWarning! Elements not supported yet. nen: {nen}; must be: 2, 3, 4, 8.') # noqa: E501
def anim_defo(Eds, timeV, sfac, nep=17, unDefoFlag=1, fmt_undefo=fmt_undefo,
interpFlag=1, endDispFlag=1, fmt_interp=fmt_interp,
fmt_nodes='b-', az_el=az_el, fig_lbrt=fig_lbrt,
fig_wi_he=fig_wi_he, xlim=[0, 1], ylim=[0, 1]):
"""Make animation of the deformed shape computed by transient analysis
Args:
Eds (ndarray): An array (n_eles x n_dof_per_element) containing
displacements per element.
timeV (1darray): vector of discretized time values
sfac (float): scale factor
nep (integer): number of evaluation points inside the element and
including both element ends
unDefoFlag (integer): 1 - plot the undeformed model (mesh), 0 - do not
plot the mesh
interpFlag (integer): 1 - interpolate deformation inside element,
0 - no interpolation
endDispFlag (integer): 1 - plot marks at element ends, 0 - no marks
fmt_interp (string): format line string for interpolated (continuous)
deformated shape. The format contains information on line color,
style and marks as in the standard matplotlib plot function.
fmt_nodes (string): format string for the marks of element ends
az_el (tuple): a tuple containing the azimuth and elevation
fig_lbrt (tuple): a tuple contating left, bottom, right and top offsets
fig_wi_he (tuple): contains width and height of the figure
Examples:
Notes:
See also:
"""
node_tags = ops.getNodeTags()
ndim = np.shape(ops.nodeCoord(node_tags[0]))[0]
if ndim == 2:
_anim_defo_2d(Eds, timeV, sfac, nep, unDefoFlag, fmt_undefo,
interpFlag, endDispFlag, fmt_interp, fmt_nodes,
fig_wi_he, xlim, ylim)
else:
print(f'\nWarning! ndim: {ndim} not supported yet.')
def section_force_distribution_2d(ex, ey, pl, nep=2,
ele_load_data=['-beamUniform', 0., 0.]):
"""
Calculate section forces (N, V, M) for an elastic 2D Euler-Bernoulli beam.
Input:
ex, ey - x, y element coordinates in global system
nep - number of evaluation points, by default (2) at element ends
ele_load_list - list of transverse and longitudinal element load
syntax: [ele_load_type, Wy, Wx]
For now only '-beamUniform' element load type is acceptable
Output:
s = [N V M]; shape: (nep,3)
section forces at nep points along local x
xl: coordinates of local x-axis; shape: (nep,)
Use it with dia_sf to draw N, V, M diagrams.
TODO: add '-beamPoint' element load type
"""
# eload_type, Wy, Wx = ele_load_data[0], ele_load_data[1], ele_load_data[2]
Wy, Wx = ele_load_data[1], ele_load_data[2]
nlf = len(pl)
if nlf == 2: # trusses
N_1 = pl[0]
elif nlf == 6: # plane frames
# N_1, V_1, M_1 = pl[0], pl[1], pl[2]
N_1, V_1, M_1 = pl[:3]
else:
print('\nWarning! Not supported. Number of nodal forces: {nlf}')
Lxy = np.array([ex[1]-ex[0], ey[1]-ey[0]])
L = np.sqrt(Lxy @ Lxy)
xl = np.linspace(0., L, nep)
one = np.ones(nep)
N = -1.*(N_1 * one + Wx * xl)
if nlf == 6:
V = V_1 * one + Wy * xl
M = -M_1 * one + V_1 * xl + 0.5 * Wy * xl**2
s = np.column_stack((N, V, M))
elif nlf == 2:
s = np.column_stack((N))
# if eload_type == '-beamUniform':
# else:
return s, xl
def section_force_distribution_3d(ex, ey, ez, pl, nep=2,
ele_load_data=['-beamUniform', 0., 0., 0.]):
"""
Calculate section forces (N, Vy, Vz, T, My, Mz) for an elastic 3d beam.
Longer description
Parameters
----------
ex : list
x element coordinates
ey : list
y element coordinates
ez : list
z element coordinates
pl : ndarray
nep : int
number of evaluation points, by default (2) at element ends
ele_load_list : list
list of transverse and longitudinal element load
syntax: [ele_load_type, Wy, Wz, Wx]
For now only '-beamUniform' element load type is acceptable.
Returns
-------
s : ndarray
[N Vx Vy T My Mz]; shape: (nep,6)
column vectors of section forces along local x-axis
uvwfi : ndarray
[u v w fi]; shape (nep,4)
displacements at nep points along local x
xl : ndarray
coordinates of local x-axis; shape (nep,)
Notes
-----
Todo: add '-beamPoint' element load type
"""
# eload_type = ele_load_data[0]
Wy, Wz, Wx = ele_load_data[1], ele_load_data[2], ele_load_data[3]
N1, Vy1, Vz1, T1, My1, Mz1 = pl[:6]
Lxyz = np.array([ex[1]-ex[0], ey[1]-ey[0], ez[1]-ez[0]])
L = np.sqrt(Lxyz @ Lxyz)
xl = np.linspace(0., L, nep)
one = np.ones(nep)
N = -1.*(N1*one + Wx*xl)
Vy = Vy1*one + Wy*xl
Vz = Vz1*one + Wz*xl
T = -T1*one
Mz = -Mz1*one + Vy1*xl + 0.5*Wy*xl**2
My = My1*one + Vz1*xl + 0.5*Wz*xl**2
s = np.column_stack((N, Vy, Vz, T, My, Mz))
return s, xl
def section_force_diagram_2d(sf_type, Ew, sfac=1., nep=17,
fmt_secforce=fmt_secforce):
"""Display section forces diagram for 2d beam column model.
This function plots a section forces diagram for 2d beam column elements
with or without element loads. For now only '-beamUniform' constant
transverse or axial element loads are supported.
Args:
sf_type (str): type of section force: 'N' - normal force,
'V' - shear force, 'M' - bending moments.
Ew (dict): Ew Python dictionary contains information on non-zero
element loads, therfore each item of the Python dictionary
is in the form: 'ele_tag: ['-beamUniform', Wy, Wx]'.
sfac (float): scale factor by wich the values of section forces are
multiplied.
nep (int): number of evaluation points including both end nodes
(default: 17)
fmt_secforce (str): format line string for section force distribution
curve. The format contains information on line color, style and
marks as in the standard matplotlib plot function.
(default: fmt_secforce = 'b-' # blue solid line)
Usage:
::
Wy, Wx = -10.e+3, 0.
Ew = {3: ['-beamUniform', Wy, Wx]}
sfacM = 5.e-5
plt.figure()
minVal, maxVal = opsv.section_force_diagram_2d('M', Ew, sfacM)
plt.title('Bending moments')
Todo:
Add support for other element loads available in OpenSees: partial
(trapezoidal) uniform element load, and 'beamPoint' element load.
"""
maxVal, minVal = -np.inf, np.inf
ele_tags = ops.getEleTags()
for ele_tag in ele_tags:
# by default no element load
eload_data = ['', 0., 0.]
if ele_tag in Ew:
eload_data = Ew[ele_tag]
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
Lxy = np.array([ex[1]-ex[0], ey[1]-ey[0]])
L = np.sqrt(Lxy @ Lxy)
cosa, cosb = Lxy / L
pl = ops.eleResponse(ele_tag, 'localForces')
s_all, xl = section_force_distribution_2d(ex, ey, pl, nep, eload_data)
if sf_type == 'N' or sf_type == 'axial':
s = s_all[:, 0]
elif sf_type == 'V' or sf_type == 'shear' or sf_type == 'T':
s = s_all[:, 1]
elif sf_type == 'M' or sf_type == 'moment':
s = s_all[:, 2]
minVal = min(minVal, np.min(s))
maxVal = max(maxVal, np.max(s))
s = s*sfac
s_0 = np.zeros((nep, 2))
s_0[0, :] = [ex[0], ey[0]]
s_0[1:, 0] = s_0[0, 0] + xl[1:] * cosa
s_0[1:, 1] = s_0[0, 1] + xl[1:] * cosb
s_p = np.copy(s_0)
# positive M are opposite to N and V
if sf_type == 'M' or sf_type == 'moment':
s *= -1.
s_p[:, 0] -= s * cosb
s_p[:, 1] += s * cosa
plt.axis('equal')
# section force curve
plt.plot(s_p[:, 0], s_p[:, 1], fmt_secforce,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
# model
plt.plot(ex, ey, 'k-', solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
# reference perpendicular lines
for i in np.arange(nep):
plt.plot([s_0[i, 0], s_p[i, 0]], [s_0[i, 1], s_p[i, 1]],
fmt_secforce, solid_capstyle='round',
solid_joinstyle='round', dash_capstyle='butt',
dash_joinstyle='round')
return minVal, maxVal
def section_force_diagram_3d(sf_type, Ew, sfac=1., nep=17,
fmt_secforce=fmt_secforce):
"""Display section forces diagram of a 3d beam column model.
This function plots section forces diagrams for 3d beam column elements
with or without element loads. For now only '-beamUniform' constant
transverse or axial element loads are supported.
Args:
sf_type (str): type of section force: 'N' - normal force,
'Vy' or 'Vz' - shear force, 'My' or 'Mz' - bending moments,
'T' - torsional moment.
Ew (dict): Ew Python dictionary contains information on non-zero
element loads, therfore each item of the Python dictionary
is in the form: 'ele_tag: ['-beamUniform', Wy, Wz, Wx]'.
sfac (float): scale factor by wich the values of section forces are
multiplied.
nep (int): number of evaluation points including both end nodes
(default: 17)
fmt_secforce (str): format line string for section force distribution
curve. The format contains information on line color, style and
marks as in the standard matplotlib plot function.
(default: fmt_secforce = 'b-' # blue solid line)
Usage:
::
Wy, Wz, Wx = -5., 0., 0.
Ew = {3: ['-beamUniform', Wy, Wz, Wx]}
sfacMz = 1.e-1
plt.figure()
minY, maxY = opsv.section_force_diagram_3d('Mz', Ew, sfacMz)
plt.title(f'Bending moments Mz, max = {maxY:.2f}, min = {minY:.2f}')
Todo:
Add support for other element loads available in OpenSees: partial
(trapezoidal) uniform element load, and 'beamPoint' element load.
"""
maxVal, minVal = -np.inf, np.inf
ele_tags = ops.getEleTags()
azim, elev = az_el
fig_wi, fig_he = fig_wi_he
fleft, fbottom, fright, ftop = fig_lbrt
fig = plt.figure(figsize=(fig_wi/2.54, fig_he/2.54))
fig.subplots_adjust(left=.08, bottom=.08, right=.985, top=.94)
ax = fig.add_subplot(111, projection=Axes3D.name)
# ax.axis('equal')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.view_init(azim=azim, elev=elev)
for i, ele_tag in enumerate(ele_tags):
# by default no element load
eload_data = ['-beamUniform', 0., 0., 0.]
if ele_tag in Ew:
eload_data = Ew[ele_tag]
nd1, nd2 = ops.eleNodes(ele_tag)
# element x, y coordinates
ex = np.array([ops.nodeCoord(nd1)[0],
ops.nodeCoord(nd2)[0]])
ey = np.array([ops.nodeCoord(nd1)[1],
ops.nodeCoord(nd2)[1]])
ez = np.array([ops.nodeCoord(nd1)[2],
ops.nodeCoord(nd2)[2]])
# eo = Eo[i, :]
xloc = ops.eleResponse(ele_tag, 'xlocal')
yloc = ops.eleResponse(ele_tag, 'ylocal')
zloc = ops.eleResponse(ele_tag, 'zlocal')
g = np.vstack((xloc, yloc, zloc))
G, _ = rot_transf_3d(ex, ey, ez, g)
g = G[:3, :3]
pl = ops.eleResponse(ele_tag, 'localForces')
s_all, xl = section_force_distribution_3d(ex, ey, ez, pl, nep,
eload_data)
# 1:'y' 2:'z'
if sf_type == 'N':
s = s_all[:, 0]
dir_plt = 1
elif sf_type == 'Vy':
s = s_all[:, 1]
dir_plt = 1
elif sf_type == 'Vz':
s = s_all[:, 2]
dir_plt = 2
elif sf_type == 'T':
s = s_all[:, 3]
dir_plt = 1
elif sf_type == 'My':
s = s_all[:, 4]
dir_plt = 2
elif sf_type == 'Mz':
s = s_all[:, 5]
dir_plt = 1
minVal = min(minVal, np.min(s))
maxVal = max(maxVal, np.max(s))
s = s*sfac
# FIXME - can be simplified
s_0 = np.zeros((nep, 3))
s_0[0, :] = [ex[0], ey[0], ez[0]]
s_0[1:, 0] = s_0[0, 0] + xl[1:] * g[0, 0]
s_0[1:, 1] = s_0[0, 1] + xl[1:] * g[0, 1]
s_0[1:, 2] = s_0[0, 2] + xl[1:] * g[0, 2]
s_p = np.copy(s_0)
# positive M are opposite to N and V
# if sf_type == 'Mz' or sf_type == 'My':
if sf_type == 'Mz':
s *= -1.
s_p[:, 0] += s * g[dir_plt, 0]
s_p[:, 1] += s * g[dir_plt, 1]
s_p[:, 2] += s * g[dir_plt, 2]
# plt.axis('equal')
# section force curve
plt.plot(s_p[:, 0], s_p[:, 1], s_p[:, 2], fmt_secforce,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
# model
plt.plot(ex, ey, ez, 'k-', solid_capstyle='round',
solid_joinstyle='round', dash_capstyle='butt',
dash_joinstyle='round')
# reference perpendicular lines
for i in np.arange(nep):
plt.plot([s_0[i, 0], s_p[i, 0]],
[s_0[i, 1], s_p[i, 1]],
[s_0[i, 2], s_p[i, 2]], fmt_secforce,
solid_capstyle='round', solid_joinstyle='round',
dash_capstyle='butt', dash_joinstyle='round')
return minVal, maxVal
def quad_sig_out_per_node():
"""Return a 2d numpy array of stress components per OpenSees node.
Returns:
sig_out (ndarray): a 2d array of stress components per node with
the following components: sxx, syy, sxy, svm, s1, s2, angle.
Size (n_nodes x 7).
Examples:
sig_out = opsv.quad_sig_out_per_node()
Notes:
s1, s2: principal stresses
angle: angle of the principal stress s1
"""
ele_tags = ops.getEleTags()
node_tags = ops.getNodeTags()
n_nodes = len(node_tags)
# initialize helper arrays
sig_out = np.zeros((n_nodes, 7))
nodes_tag_count = np.zeros((n_nodes, 2), dtype=int)
nodes_tag_count[:, 0] = node_tags
for i, ele_tag in enumerate(ele_tags):
nd1, nd2, nd3, nd4 = ops.eleNodes(ele_tag)
ind1 = node_tags.index(nd1)
ind2 = node_tags.index(nd2)
ind3 = node_tags.index(nd3)
ind4 = node_tags.index(nd4)
nodes_tag_count[[ind1, ind2, ind3, ind4], 1] += 1
sig_ip_el = ops.eleResponse(ele_tag, 'stress')
sigM_ip = np.vstack(([sig_ip_el[0:3],
sig_ip_el[3:6],
sig_ip_el[6:9],
sig_ip_el[9:12]]))
sigM_nd = quad_extrapolate_ip_to_node(sigM_ip)
# sxx
sig_out[ind1, 0] += sigM_nd[0, 0]
sig_out[ind2, 0] += sigM_nd[1, 0]
sig_out[ind3, 0] += sigM_nd[2, 0]
sig_out[ind4, 0] += sigM_nd[3, 0]
# syy
sig_out[ind1, 1] += sigM_nd[0, 1]
sig_out[ind2, 1] += sigM_nd[1, 1]
sig_out[ind3, 1] += sigM_nd[2, 1]
sig_out[ind4, 1] += sigM_nd[3, 1]
# sxy
sig_out[ind1, 2] += sigM_nd[0, 2]
sig_out[ind2, 2] += sigM_nd[1, 2]
sig_out[ind3, 2] += sigM_nd[2, 2]
sig_out[ind4, 2] += sigM_nd[3, 2]
indxs, = np.where(nodes_tag_count[:, 1] > 1)
# n_indxs < n_nodes: e.g. 21<25 (bous), 2<6 (2el) etc.
n_indxs = np.shape(indxs)[0]
# divide summed stresses by the number of common nodes
sig_out[indxs, :] = \
sig_out[indxs, :]/nodes_tag_count[indxs, 1].reshape(n_indxs, 1)
# warning reshape from (pts,ncomp) to (ncomp,pts)
vm_out = vm_stress(np.transpose(sig_out[:, :3]))
sig_out[:, 3] = vm_out
princ_sig_out = princ_stress(np.transpose(sig_out[:, :3]))
sig_out[:, 4:7] = np.transpose(princ_sig_out)
return sig_out
def quad_extrapolate_ip_to_node(yip):
"""
Exprapolate values at 4 integration points to 4 nodes of a quad element.
Integration points of Gauss quadrature.
Usefull for : stress components (sxx, syy, sxy)
yip - either a single vector (4,) or array (4,3) /sxx syy sxy/
or array (4, n)
"""
xep = np.sqrt(3.)/2
X = np.array([[1.+xep, -1/2., 1.-xep, -1/2.],
[-1/2., 1.+xep, -1/2., 1.-xep],
[1.-xep, -1/2., 1.+xep, -1/2.],
[-1/2., 1.-xep, -1/2., 1.+xep]])
ynp = X @ yip
return ynp
def quad_9n_extrapolate_ip_to_node(yip):
"""
Exprapolate values at 9 integration points to 9 nodes of a quad element.
Integration points of Gauss quadrature.
Usefull for : stress components (sxx, syy, sxy)
yip - either a single vector (4,) or array (4,3) /sxx syy sxy/
or array (4, n)
"""
a = 1./ | np.sqrt(0.6) | numpy.sqrt |
#!/usr/bin/python3
from src.AccuracyCalculator import *
import tensorflow as tf
import time
import numpy as np
import settings.DataSettings as dataSettings
# maxCount = 8
netPrediction_1 = np.array( [[ [0.9, 0.1], [0.7, 0.3], [0.6, 0.4], [0.4, 0.6], [0.3, 0.7],
[0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7] ]] )
label_1 = np.array( [ [dataSettings.FIGHT_LABEL] * netPrediction_1.shape[0] ] )
# maxCount = 4
netPrediction_2 = np.array( [[ [0.9, 0.1], [0.7, 0.3], [0.3, 0.7], [0.4, 0.6], [0.3, 0.7],
[0.9, 0.1], [0.9, 0.1], [0.2, 0.8], [0.1, 0.9], [0.2, 0.8], [0.3, 0.7] ]] )
label_2 = np.array( [ [dataSettings.FIGHT_LABEL] * netPrediction_2.shape[0] ] )
# maxCount = 2
netPrediction_3 = np.array( [[ [0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.8, 0.2],
[0.1, 0.9], [0.9, 0.1], [0.2, 0.8], [0.1, 0.9] ]] )
label_3 = np.array( [ [dataSettings.NO_FIGHT_LABEL] * netPrediction_3.shape[0] ] )
# maxCount = 6
netPrediction_4 = np.array( [[ [0.9, 0.1], [0.3, 0.7], [0.9, 0.1], [0.4, 0.6], [0.3, 0.7],
[0.1, 0.9], [0.1, 0.9], [0.2, 0.8], [0.1, 0.9], [0.7, 0.3] ]] )
label_4 = np.array( [ [dataSettings.NO_FIGHT_LABEL] * netPrediction_4.shape[0] ] )
def Check_CalculateAccuracy():
print("Check_CalculateAccuracy()")
accuracyCalculator = VideosAccuracyCalculator()
# 5 TP
numberOfTP = 5
truePositivePredictions = np.tile(netPrediction_1, [numberOfTP, 1, 1])
truePositiveLabels = np.tile(label_1, [numberOfTP, 1, 1])
accuracyCalculator.AppendNetPredictions(truePositivePredictions, truePositiveLabels)
# 2 FN
numberOfFN = 2
falseNegativePredictions = np.tile(netPrediction_2, [numberOfFN, 1, 1])
falseNegativeLabels = np.tile(label_2, [numberOfFN, 1, 1])
accuracyCalculator.AppendNetPredictions(falseNegativePredictions, falseNegativeLabels)
# 3 TN
numberOfTN = 3
trueNegativePredictions = np.tile(netPrediction_3, [numberOfTN, 1, 1])
trueNegativeLabels = | np.tile(label_3, [numberOfTN, 1, 1]) | numpy.tile |
# -*- coding: utf-8 -*-
"""
Code written entirely by <NAME>.
Find K clusters given a set of points.
Points tend to be normally distributed.
"""
# Import dependecies
import numpy as np
import matplotlib.pyplot as plt
def extract(filename):
""" Extract data from the txt file."""
return np.loadtxt(filename)
def distance(a,b):
""" Return the Euclidean distance between two points."""
return | np.linalg.norm(a-b) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
#
# Project: silx (originally pyFAI)
# https://github.com/silx-kit/silx
#
# Copyright (C) 2012-2017 European Synchrotron Radiation Facility, Grenoble, France
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "02/08/2016"
import unittest
import numpy
import logging
logger = logging.getLogger(__name__)
from ..bilinear import BilinearImage
class TestBilinear(unittest.TestCase):
"""basic maximum search test"""
N = 1000
def test_max_search_round(self):
"""test maximum search using random points: maximum is at the pixel center"""
a = numpy.arange(100) - 40.
b = numpy.arange(100) - 60.
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40) > 1e-4 or abs(l - 60) > 1e-4:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_max_search_half(self):
"""test maximum search using random points: maximum is at a pixel edge"""
a = numpy.arange(100) - 40.5
b = numpy.arange(100) - 60.5
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = BilinearImage(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40.5) > 0.5 or abs(l - 60.5) > 0.5:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)", i, j, k, l)
ok += 1
logger.debug("Success rate: %.1f", 100. * ok / self.N)
self.assertEqual(ok, self.N, "Maximum is always found")
def test_map(self):
N = 100
y, x = numpy.ogrid[:N, :N + 10]
img = x + y
b = BilinearImage(img)
x2d = numpy.zeros_like(y) + x
y2d = numpy.zeros_like(x) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img).max(), 0, "images are the same (corners)")
x2d = numpy.zeros_like(y) + (x[:, :-1] + 0.5)
y2d = numpy.zeros_like(x[:, :-1]) + y
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:, :-1] - 0.5).max(), 0, "images are the same (middle)")
x2d = numpy.zeros_like(y[:-1, :]) + (x[:, :-1] + 0.5)
y2d = numpy.zeros_like(x[:, :-1]) + (y[:-1, :] + 0.5)
res1 = b.map_coordinates((y2d, x2d))
self.assertEqual(abs(res1 - img[:-1, 1:]).max(), 0, "images are the same (center)")
def test_profile_grad(self):
N = 100
img = numpy.arange(N * N).reshape(N, N)
b = BilinearImage(img)
res1 = b.profile_line((0, 0), (N - 1, N - 1))
l = numpy.ceil(numpy.sqrt(2) * N)
self.assertEqual(len(res1), l, "Profile has correct length")
self.assertLess((res1[:-2] - res1[1:-1]).std(), 1e-3, "profile is linear (excluding last point)")
def test_profile_gaus(self):
N = 100
x = numpy.arange(N) - N // 2.0
g = | numpy.exp(-x * x / (N * N)) | numpy.exp |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
import numpy as np
import tensorflow.compat.v1 as tf
os.sys.path.append("../") # dynamic_sparsity
class BertSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, opts, dtype):
super(BertSchedule, self).__init__()
self.dtype = dtype
self.peak_lr = tf.constant(opts.peak_learning_rate, tf.float32)
self.warmup_steps_i = tf.cast(opts.warmup_steps, tf.int32)
self.warmup_steps_f = tf.cast(self.warmup_steps_i, tf.float32)
self.decay_steps_i = tf.cast(opts.cooldown_steps, tf.int32)
self.decay_steps_f = tf.cast(self.decay_steps_i, tf.float32)
self.min_lr = opts.min_learning_rate
self.decay_power = opts.decay_power
def __call__(self, step):
# Casting
step_i = tf.cast(step, tf.int32)
step_f = tf.cast(step_i, tf.float32)
# Branching
poly_learning_rate = tf.train.polynomial_decay(self.peak_lr, step_i - self.warmup_steps_i,
self.decay_steps_f, self.min_lr,
power=self.decay_power)
def true_fn():
return (step_f / self.warmup_steps_f) * self.peak_lr
def false_fn():
return poly_learning_rate
return tf.cast(tf.cond(step_i < self.warmup_steps_i, true_fn, false_fn), self.dtype)
def build_optimizer(opt_name, opt_args=None):
# Fetch the requested optimiser
opt_cls = {
'GradientDescent': tf.train.GradientDescentOptimizer,
'Momentum': tf.train.MomentumOptimizer,
'Adam': tf.train.AdamOptimizer
}.get(opt_name)
if opt_cls is None:
raise ValueError(f'Unsupported optimizer {opt_name}')
# Fetch default kwargs, accepting overrides from argparse
opt_kws = {
'GradientDescent': {},
'Momentum': {
'momentum': 0.0001,
'use_nesterov': True
},
'Adam': {
'beta1': 0.9,
'beta2': 0.999,
'epsilon': 1e-02
}
}.get(opt_name)
if opt_args is not None:
opt_kws.update(opt_args)
return opt_cls, opt_kws
def make_histogram_proto(data, bins_count=None):
# number of elements in the array
elem_count = np.prod(data.shape)
# Make sure the number of bins is defined and
# doesn't exceed the nume of element
if bins_count is None:
bins_count = elem_count
else:
bins_count = np.min([bins_count, elem_count]).astype(np.int)
# compute histogram using numpy
occurrences, bin_edges = np.histogram(data, bins=bins_count)
return tf.HistogramProto(min=data.min().astype(np.float),
max=data.min().astype(np.float),
num=elem_count.astype(np.int),
sum=np.sum(data).astype(np.float),
sum_squares= | np.sum([datum * datum for datum in data]) | numpy.sum |
"""
Functions to count and cluster amino acid sequences.
"""
import numpy as np
from . import utils
class PSFM:
"""Meta class for a position specific scoring matrix"""
def __init__(self, pssm, alphabet=utils.AMINO_ACIDS, comments=(), consensus=None):
self._psfm = psfm
self.alphabet = alphabet
self.comments = comments
self.consensus = consensus
@classmethod
def from_txt(cls, lines):
"""Load a scoring/frequency matrix from text"""
alphabet = None
dtype = int
matlines = []
comments = []
consensus = []
for line in lines:
line = line.strip()
if line.startswith('#'):
comments.append(line.lstrip('#'))
line = line.split('#')[0]
if not line:
continue
if alphabet is None:
alphabet = tuple(line.strip().split())
else:
if dtype is float or '.' in line:
dtype = float
line = line.split()
consensus.append(line[1])
matlines.append([dtype(n) for n in line[2:]])
mat = np.array(matlines)
return cls(mat, alphabet=''.join(alphabet), comments=comments, consensus=''.join(consensus))
def to_txt(self, mat=None):
"""Output a matrix in text format."""
lines = []
for line in self.comments:
lines.append(f'#{line}')
if mat is None:
mat = self.pssm
consensus = self.consensus
if self.consensus is None:
consensus = ['X'] * mat.shape[0]
l = len(str(mat.shape[0]))
lines.append(' ' * (l + 3) + ' '.join([f' {a:<5}' for a in self.alphabet]))
for i in range(mat.shape[0]):
line = f'{i+1:>{l}} {consensus[i]} ' + ' '.join([f'{n:>6.3f}' if n else ' 0 ' for n in mat[i]])
lines.append(line)
return lines
def psfm(self):
return self._psfm
def pssm(self, bg='blosum62', return_psfm=False):
if bg is None:
bg = np.ones(len(self.alphabet)) / len(self.alphabet)
elif bg in ('blosum62', 'blosum', 'bl62', 'bl'):
bg = utils.bgfreq_array(self.alphabet)
psfm = self.psfm()
pssm = np.zeros_like(psfm, dtype='float')
mask = psfm > 0
pssm[mask] = np.log2((psfm / bg)[mask])
if return_psfm:
return pssm, psfm
return pssm
def shannon_logo(self):
pssm, psfm = self.pssm(bg=None, return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * psfm
def kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * psfm * np.sign(pssm)
def weighted_kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return np.sum(pssm * psfm, axis=1, keepdims=True) * pssm / np.sum(np.abs(pssm), axis=1, keepdims=True)
def p_weighted_kullback_leibler_logo(self):
pssm, psfm = self.pssm(bg='blosum62', return_psfm=True)
return | np.sum(pssm * psfm, axis=1, keepdims=True) | numpy.sum |
# coding=utf-8
# Copyright 2022 HyperBO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load or generate data for learning priors in BayesOpt."""
import functools
import itertools
import pickle
from absl import logging
from hyperbo.basics import data_utils
from hyperbo.basics import definitions as defs
from hyperbo.gp_utils import gp
import jax
import jax.numpy as jnp
import numpy as np
import pandas as pd
from tensorflow.io import gfile
partial = functools.partial
SubDataset = defs.SubDataset
PD1 = {
('phase0', 'matched'): '../pd1/pd1_matched_phase0_results.jsonl',
('phase1', 'matched'): '../pd1/pd1_matched_phase1_results.jsonl',
('phase0', 'unmatched'): '../pd1/pd1_unmatched_phase0_results.jsonl',
('phase1', 'unmatched'): '../pd1/pd1_unmatched_phase1_results.jsonl',
}
HPOB_ROOT_DIR = 'hpob-data/'
def get_aligned_dataset(trials, study_identifier, labels, verbose=True):
"""Get aligned dataset from processed trials from get_dataset.
Args:
trials: pandas.DataFrame that stores all the trials.
study_identifier: a label that uniquely identifies each study group.
labels: labels of parameters and an eval metric (the last one).
verbose: print info about data if True.
Returns:
aligned_dataset: Dict[str, SubDataset], mapping from aligned dataset names
to an aligned SubDataset, with n x d input x, n x m evals y and non-empty
aligned field with concatenated m study group names and aligned_suffix.
"""
aligned_dataset = {}
trials = trials[trials['aligned']]
for aligned_suffix in trials['aligned_suffix'].unique():
aligned_trials = trials[trials['aligned_suffix'] == aligned_suffix]
aligned_groups = aligned_trials[study_identifier].unique()
pivot_df = aligned_trials.pivot(
index=labels[:-1], columns=study_identifier, values=labels[-1])
nan_groups = [
c for c in pivot_df.columns if pivot_df[c].isna().values.any()
]
combnum = min(3, len(nan_groups) + 1, len(aligned_groups) - 1)
for groups in itertools.chain(
*[itertools.combinations(nan_groups, r) for r in range(combnum)]):
remain_groups = [sg for sg in aligned_groups if sg not in groups]
if groups:
index = np.all([pivot_df[sg].isnull() for sg in groups], axis=0)
sub_df = pivot_df.loc[index, remain_groups].dropna().reset_index()
else:
sub_df = pivot_df.dropna().reset_index()
if sub_df.shape[0] > 0:
if verbose:
print('removed groups: ', groups)
print('remaining groups: ', remain_groups)
print('sub_df: ', sub_df.shape)
aligned_key = ';'.join(list(groups) + [aligned_suffix])
xx = jnp.array(sub_df[labels[:-1]])
yy = jnp.array(sub_df[remain_groups])
aligned_dataset[aligned_key] = SubDataset(
x=xx, y=yy, aligned=';'.join(remain_groups + [aligned_suffix]))
msg = f'aligned dataset: {jax.tree_map(jnp.shape, aligned_dataset)}'
logging.info(msg=msg)
if verbose:
print(msg)
return aligned_dataset
def get_dataset(trials, study_identifier, labels, verbose=True):
"""Get dataset from a dataframe.
Args:
trials: pandas.DataFrame that stores all the trials.
study_identifier: a label that uniquely identifies each study group.
labels: labels of parameters and an eval metric (the last one).
verbose: print info about data if True.
Returns:
dataset: Dict[str, SubDataset], mapping from study group to a SubDataset.
"""
study_groups = trials[study_identifier].unique()
dataset = {}
for sg in study_groups:
study_trials = trials.loc[trials[study_identifier] == sg, labels]
xx = jnp.array(study_trials[labels[:-1]])
yy = jnp.array(study_trials[labels[-1:]])
dataset[sg] = SubDataset(x=xx, y=yy)
msg = f'dataset before align: {jax.tree_map(jnp.shape, dataset)}'
logging.info(msg)
if verbose:
print(msg)
return dataset
def sample_sub_dataset(key,
trials,
study_identifier,
labels,
p_observed=0.,
verbose=True,
sub_dataset_key=None):
"""Sample a sub-dataset from trials dataframe.
Args:
key: random state for jax.random.
trials: pandas.DataFrame that stores all the trials.
study_identifier: a label that uniquely identifies each study group.
labels: labels of parameters and an eval metric (the last one).
p_observed: percentage of data that is observed.
verbose: print info about data if True.
sub_dataset_key: sub_dataset name to be queried.
Returns:
trials: remaining trials after removing the sampled sub-dataset.
sub_dataset_key: study group key for testing in dataset.
queried_sub_dataset: SubDataset to be queried.
"""
test_study_key, observed_key = jax.random.split(key, 2)
if sub_dataset_key is None:
study_groups = trials[study_identifier].unique()
sub_dataset_id = jax.random.choice(test_study_key, len(study_groups))
sub_dataset_key = study_groups[sub_dataset_id]
else:
study_groups = trials[study_identifier].unique()
if sub_dataset_key not in study_groups:
raise ValueError(f'{sub_dataset_key} must be in dataframe.')
queried_trials = trials[trials[study_identifier] == sub_dataset_key].sample(
frac=1. - p_observed, replace=False, random_state=observed_key[0])
trials = trials.drop(queried_trials.index)
xx = jnp.array(queried_trials[labels[:-1]])
yy = jnp.array(queried_trials[labels[-1:]])
queried_sub_dataset = SubDataset(x=xx, y=yy)
msg = (f'removed study={sub_dataset_key} '
f'removed study shape: x-{queried_sub_dataset.x.shape}, '
f'y-{queried_sub_dataset.y.shape}')
logging.info(msg)
if verbose:
print(msg)
return trials, sub_dataset_key, queried_sub_dataset
def process_dataframe(
key,
trials,
study_identifier,
labels,
p_observed=0.,
maximize_metric=True,
warp_func=None,
verbose=True,
sub_dataset_key=None,
num_remove=0,
p_remove=0.,
):
"""Process a dataframe and return needed info for an experiment.
Args:
key: random state for jax.random or sub_dataset_key to be queried.
trials: pandas.DataFrame that stores all the trials.
study_identifier: a label that uniquely identifies each study group.
labels: labels of parameters and an eval metric (the last one).
p_observed: percentage of data that is observed.
maximize_metric: a boolean indicating if higher values of the eval metric
are better or not. If maximize_metric is False and there is no warping for
the output label, we negate all outputs.
warp_func: mapping from label names to warping functions.
verbose: print info about data if True.
sub_dataset_key: sub_dataset name to be queried.
num_remove: number of sub-datasets to remove.
p_remove: proportion of data to be removed.
Returns:
dataset: Dict[str, SubDataset], mapping from study group to a SubDataset.
sub_dataset_key: study group key for testing in dataset.
queried_sub_dataset: SubDataset to be queried.
"""
trials = trials[[study_identifier] + labels +
['aligned', 'aligned_suffix']].copy(deep=True)
trials = trials.dropna()
if verbose:
print('trials: ', trials.shape)
if not warp_func:
warp_func = {}
logging.info(msg=f'warp_func = {warp_func}')
if labels[-1] not in warp_func and not maximize_metric:
warp_func[labels[-1]] = lambda x: -x
for la, fun in warp_func.items():
if la in labels:
trials.loc[:, la] = fun(trials.loc[:, la])
key, subkey = jax.random.split(key)
trials, sub_dataset_key, queried_sub_dataset = sample_sub_dataset(
key=subkey,
trials=trials,
study_identifier=study_identifier,
labels=labels,
p_observed=p_observed,
verbose=verbose,
sub_dataset_key=sub_dataset_key)
for _ in range(num_remove):
key, subkey = jax.random.split(key)
removed_sub_dataset_key = None
sub_dataset_key_split = sub_dataset_key.split(',')
if len(sub_dataset_key_split) > 1:
task_dataset_name = sub_dataset_key_split[1]
study_groups = trials[study_identifier].unique()
for s in study_groups:
if task_dataset_name in s:
removed_sub_dataset_key = s
trials, _, _ = sample_sub_dataset(
key=subkey,
trials=trials,
study_identifier=study_identifier,
labels=labels,
p_observed=p_observed,
verbose=verbose,
sub_dataset_key=removed_sub_dataset_key)
if p_remove > 0:
key, subkey = jax.random.split(key)
removed_trials = trials.sample(
frac=p_remove, replace=False, random_state=subkey[0])
trials = trials.drop(removed_trials.index)
dataset = get_dataset(
trials=trials,
study_identifier=study_identifier,
labels=labels,
verbose=verbose)
aligned_dataset = get_aligned_dataset(
trials=trials,
study_identifier=study_identifier,
labels=labels,
verbose=verbose)
dataset.update(aligned_dataset)
return dataset, sub_dataset_key, queried_sub_dataset
def pd1(key,
p_observed,
verbose=True,
sub_dataset_key=None,
input_warp=True,
output_log_warp=True,
num_remove=0,
metric_name='best_valid/error_rate',
p_remove=0.):
"""Load PD1(Nesterov) from init2winit and pick a random study as test function.
For matched dataframes, we set `aligned` to True in its trials and reflect it
in its corresponding SubDataset.
The `aligned` value and sub-dataset key has suffix aligned_suffix, which is
its phase identifier.
Args:
key: random state for jax.random.
p_observed: percentage of data that is observed.
verbose: print info about data if True.
sub_dataset_key: sub_dataset name to be queried.
input_warp: apply warping to data if True.
output_log_warp: use log warping for output.
num_remove: number of sub-datasets to remove.
metric_name: name of metric.
p_remove: proportion of data to be removed.
Returns:
dataset: Dict[str, SubDataset], mapping from study group to a SubDataset.
sub_dataset_key: study group key for testing in dataset.
queried_sub_dataset: SubDataset to be queried.
"""
all_trials = []
for k, v in PD1.items():
with gfile.GFile(v, 'r') as f:
trials = pd.read_json(f, orient='records', lines=True)
trials.loc[:, 'aligned'] = (k[1] == 'matched')
trials.loc[:, 'aligned_suffix'] = k[0]
all_trials.append(trials)
trials = pd.concat(all_trials)
labels = [
'hps.lr_hparams.decay_steps_factor', 'hps.lr_hparams.initial_value',
'hps.lr_hparams.power', 'hps.opt_hparams.momentum', metric_name
]
warp_func = {}
if input_warp:
warp_func = {
'hps.opt_hparams.momentum': lambda x: np.log(1 - x),
'hps.lr_hparams.initial_value': np.log,
}
if output_log_warp:
warp_func['best_valid/error_rate'] = lambda x: -np.log(x + 1e-10)
return process_dataframe(
key=key,
trials=trials,
study_identifier='study_group',
labels=labels,
p_observed=p_observed,
maximize_metric=False,
warp_func=warp_func if input_warp else None,
verbose=verbose,
sub_dataset_key=sub_dataset_key,
num_remove=num_remove,
p_remove=p_remove)
def _deduplicate(x, y, dataset_name, verbose=True):
"""Deduplicates x values, keeping the ones with highest y."""
# Sort by decreasing Y values: deduplication keeps points with best rewards.
sorted_xy = list(zip(*sorted(zip(x, y), key=lambda xy: xy[1], reverse=True)))
x = np.array(sorted_xy[0])
y = np.array(sorted_xy[1])
_, idx = np.unique(x, axis=0, return_index=True)
if verbose:
print(
f'Removed {x.shape[0] - len(idx)} duplicated points from {dataset_name}'
)
return x[idx, :], y[idx, :]
def _normalize_maf_dataset(maf_dataset, num_hparams, neg_error_to_accuracy):
"""Project the hparam values to [0, 1], and optionally convert y values.
Args:
maf_dataset: a dictionary of the format `{subdataset_name: {"X": ...,
"Y":...}}`.
num_hparams: the number of different hyperparameters being optimized.
neg_error_to_accuracy: whether to transform y values that correspond to
negative error rates to accuracy values.
Returns:
`maf_dataset` with normalized "X", and maybe "Y", values.
"""
min_vals = np.ones(num_hparams) * np.inf
max_vals = -np.ones(num_hparams) * np.inf
for k, subdataset in maf_dataset.items():
min_vals = np.minimum(min_vals, np.min(subdataset['X'], axis=0))
max_vals = np.maximum(max_vals, np.max(subdataset['X'], axis=0))
for k in maf_dataset:
maf_dataset[k]['X'] = (maf_dataset[k]['X'] - min_vals) / (
max_vals - min_vals)
if neg_error_to_accuracy:
maf_dataset[k]['Y'] = 1 + maf_dataset[k]['Y']
return maf_dataset
def process_pd1_for_maf(outfile_path,
min_num_points,
input_warp,
output_log_warp,
neg_error_to_accuracy,
enforce_same_size_subdatasets,
verbose=True):
"""Store the pd1 dataset on cns in a MAF baseline-friendly format.
Args:
outfile_path: cns path where the pickled output is stored.
min_num_points: Minimum number of points that must be in a subdataset to
keep it.
input_warp: apply warping to data if True.
output_log_warp: use log warping for output.
neg_error_to_accuracy: whether to transform y values that correspond to
negative error rates to accuracy values. Cannot be True if output_log_warp
is True.
enforce_same_size_subdatasets: whether to to only keep `n` points of each
subdataset, where `n` is the size of the smallest remaining dataset.
verbose: print deduplication info about if True.
"""
if output_log_warp and neg_error_to_accuracy:
raise ValueError('Cannot transform y-values when the pd1 outputs are '
'log-warped!')
key = jax.random.PRNGKey(0)
dataset, _, _ = pd1(
key, p_observed=1, input_warp=input_warp, output_log_warp=output_log_warp)
num_hparams = dataset[list(dataset.keys())[0]].x.shape[1]
excluded_subdatasets = ['imagenet_resnet50,imagenet,resnet,resnet50,1024']
# Load and deduplicate data.
maf_dataset = {}
for k, subdataset in dataset.items():
if subdataset.aligned is None and k not in excluded_subdatasets:
x, y = _deduplicate(
np.array(subdataset.x),
np.array(subdataset.y),
dataset_name=k,
verbose=verbose)
if x.shape[0] > min_num_points:
maf_dataset[k] = dict(X=x, Y=y)
if enforce_same_size_subdatasets:
min_subdataset_size = min(
[maf_dataset[k]['X'].shape[0] for k in maf_dataset])
for k, subdataset in maf_dataset.items():
x, y = maf_dataset[k]['X'], maf_dataset[k]['Y']
maf_dataset[k] = dict(
X=x[:min_subdataset_size, :], Y=y[:min_subdataset_size, :])
maf_dataset = _normalize_maf_dataset(
maf_dataset,
num_hparams=num_hparams,
neg_error_to_accuracy=neg_error_to_accuracy)
data_utils.log_dataset(maf_dataset)
with gfile.Open(outfile_path, 'wb') as f:
pickle.dump(maf_dataset, f, pickle.HIGHEST_PROTOCOL)
def get_output_warper(output_log_warp=True, return_warping=False):
"""Returns an output warper with the option to use -log on 1-y."""
if output_log_warp:
def output_warping(f):
def warpped_f(x_array):
y = f(x_array)
if not | np.all(y <= 1. + 1e-11) | numpy.all |
import sounddevice as sd
import time
import numpy as np
import pygame
"""
Funktions- und Klassendefinitionen
"""
#Queue-Artige Liste, kann vollständigen inhalt als numpyarray ausgeben
class schlange:
def __init__(self,max_groesse):
self.__max_groesse = max_groesse
self.__voll = False
self.__liste = []
#anhaengen: hänge ein objekt an die Schlange an, wenn die Liste dadurch zu groß wird, lösche den ältesten eintrag
def anhaengen(self,ding):
self.__liste.append(ding)
if len(self.__liste) > self.__max_groesse:
self.__voll=True
self.__liste.pop(0)
#get_liste: gib die komplette liste als numpy array aus
def get_liste(self):
return np.array(self.__liste)
#nullen: setze alle Werte der Schlange auf 0
def nullen(self):
for i in range(len(self.__liste)):self.__liste[i]=0
def ist_voll(self):
return self.__voll
#fuehrt eine FFT bzw DFT auf einem bereits gefensterten datenslice aus(abhaengig von der Slicegroesse) und binned das ergebnis
def fft_auf_slice_binned(daten,slicegroesse,bingroesse):
slice_FFT = abs(np.fft.fft(daten,slicegroesse)[0:int(slicegroesse/2)])
binned_slice = []
iterator = 0
bin_wert = 0
for i in slice_FFT:
if iterator > 4:
binned_slice.append(bin_wert/5)
iterator = 0
bin_wert = 0
bin_wert += i
iterator += 1
binned_slice.append(bin_wert/5)
binned_slice = np.array(binned_slice)
return binned_slice
def fft_auf_slice(daten,slicegroesse):
slice_FFT = abs( | np.fft.fft(daten,slicegroesse) | numpy.fft.fft |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 14:54:18 2017
@author: 19514733
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from cvxopt import matrix as cvxmat, solvers
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils import column_or_1d
import monoboost
#__all__ = [
# "Scale",
# "MonoComparator",
# "MonoLearner",
# "MonoBoost",
# "MonoBoostEnsemble",
# "apply_rules_c"]
TOL = 0 # 1e-55
class Scale():
"""Performs scaling of linear variables according to Friedman et al. 2005
Sec 5
Each variable is firsst Winsorized l->l*, then standardised as 0.4 x l* /
std(l*).
Warning: this class should not be used directly.
"""
def __init__(self, trim_quantile=0.0):
self.trim_quantile = trim_quantile
self.scale_multipliers = None
self.winsor_lims = None
def train(self, X):
# get winsor limits
self.winsor_lims = np.ones([2, X.shape[1]]) * np.inf
self.winsor_lims[0, :] = -np.inf
if self.trim_quantile > 0:
for i_col in np.arange(X.shape[1]):
lower = np.percentile(X[:, i_col], self.trim_quantile * 100)
upper = np.percentile(
X[:, i_col], 100 - self.trim_quantile * 100)
self.winsor_lims[:, i_col] = [lower, upper]
# get multipliers
scale_multipliers = np.ones(X.shape[1])
for i_col in np.arange(X.shape[1]):
num_uniq_vals = len(np.unique(X[:, i_col]))
# don't scale binary variables which are effectively already rules:
if num_uniq_vals > 2:
X_col_winsorised = X[:, i_col].copy()
X_col_winsorised[X_col_winsorised <
self.winsor_lims[0, i_col]
] = self.winsor_lims[0, i_col]
X_col_winsorised[X_col_winsorised >
self.winsor_lims[1, i_col]
] = self.winsor_lims[1, i_col]
scale_multipliers[i_col] = 1.0 / np.std(X_col_winsorised)
self.scale_multipliers = scale_multipliers
def scale(self, X):
return X * self.scale_multipliers
def unscale(self, X):
return X / self.scale_multipliers
class MonoComparator():
def __init__(self, n_feats, incr_feats, decr_feats, nmt_hyperplane=None):
self.incr_feats = np.asarray(incr_feats)
self.decr_feats = np.asarray(decr_feats)
self.nmt_hyperplane = nmt_hyperplane
self.mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
self.nmt_feats = np.asarray(
[j for j in np.arange(n_feats) + 1 if j not in self.mt_feats])
self.n_feats = n_feats
def compare(self, x1_in, x2_in, check_nmt_feats=True, strict=False):
# returns: -1 if decreasing, 0 if identical, +1 if increasing, -99 if
# incomparable
if len(self.mt_feats) == 0:
return -99
elif len(x1_in.shape) > 1:
x1 = np.ravel(x1_in)
x2 = np.ravel(x2_in)
else:
x1 = x1_in.copy()
x2 = x2_in.copy()
# check for identical
if np.array_equal(x1, x2):
return 0
# reverse polarity of decreasing features
for dec_feat in self.decr_feats:
x1[dec_feat - 1] = -1 * x1[dec_feat - 1]
x2[dec_feat - 1] = -1 * x2[dec_feat - 1]
# check mt feats all increasing (or decreasing)
mt_feats_difference = np.zeros(self.n_feats)
if len(self.mt_feats) > 0:
feats_indx = self.mt_feats - 1
mt_feats_difference[feats_indx] = x2[feats_indx] - x1[feats_indx]
mt_feats_same = np.sum(mt_feats_difference[self.mt_feats - 1] == 0)
if strict:
mt_feats_incr = np.sum(mt_feats_difference[self.mt_feats - 1] > 0)
mt_feats_decr = np.sum(mt_feats_difference[self.mt_feats - 1] < 0)
else:
mt_feats_incr = np.sum(mt_feats_difference[self.mt_feats - 1] >= 0)
mt_feats_decr = np.sum(mt_feats_difference[self.mt_feats - 1] <= 0)
if mt_feats_same == len(self.mt_feats):
comp = 0
elif mt_feats_incr == len(self.mt_feats): # increasing
comp = +1
elif mt_feats_decr == len(self.mt_feats): # decreasing
comp = -1
else: # incomparale
comp = -99
# short exit if available
if comp == -99 or comp == 0:
return -99
# if still going, check mt feats by weakened planes
if len(
self.nmt_feats) == 0 or not check_nmt_feats or (
self.nmt_hyperplane is None):
nmt_feat_compliance = True
else:
x_diff = np.abs(x2 - x1)
dot_prod = np.dot(self.nmt_hyperplane, x_diff)
nmt_feat_compliance = dot_prod >= -TOL
# return result
if nmt_feat_compliance:
return comp
else: # incomparable due to nmt features
return -99
class MonoLearner():
def __init__(
self,
n_feats,
incr_feats,
decr_feats,
coefs=None,
dirn=None,
x_base=None,
nmt_hyperplane=None,
learner_type='two-sided',
loss='rmse'):
self.incr_feats = np.asarray(incr_feats)
self.decr_feats = np.asarray(decr_feats)
self.coefs = coefs
self.dirn = dirn
self.x_base = x_base
self.intercept_=0.
self.mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
self.nmt_feats = np.asarray(
[j for j in np.arange(n_feats) + 1 if j not in self.mt_feats])
self.mt_feat_types=np.zeros(n_feats,dtype=np.float64)
if len(self.incr_feats)>0:
self.mt_feat_types[self.incr_feats-1]=+1.
if len(self.decr_feats)>0:
self.mt_feat_types[self.decr_feats-1]=-1.
self.comparator = MonoComparator(
n_feats, incr_feats, decr_feats, nmt_hyperplane)
self.nmt_hyperplane = nmt_hyperplane
self.learner_type_code = 0 if learner_type == 'two-sided' else 1
# note loss only affects the calculation of the coefficients - all
# splits are done RMSE
self.loss = loss
@property
def nmt_hyperplane(self):
"""I'm the 'x' property."""
return self.comparator.nmt_hyperplane
@nmt_hyperplane.setter
def nmt_hyperplane(self, value):
self.comparator.nmt_hyperplane = value
def get_comparable_points(self,X):
intercepts=np.asarray([self.intercept_],dtype=np.float64)
if len(X.shape)<2:
X_=np.asarray(X.reshape([1,-1]),dtype=np.float64)
else:
X_=np.asarray(X,dtype=np.float64)
X_base_pts_=np.asarray(self.x_base.reshape([1,-1]),dtype=np.float64)
nmt_hps_=np.asarray(self.nmt_hyperplane.reshape([1,-1]),dtype=np.float64)
X_comp_pts=np.zeros([X_.shape[0],X_base_pts_.shape[0]],dtype=np.int32)
monoboost.apply_rules_c(X_,
X_base_pts_,
nmt_hps_,
intercepts,
self.mt_feat_types,
np.float64(self.dirn),
0,
X_comp_pts)
return X_comp_pts[:,0]==1
def decision_function(self, X_pred):
if len(X_pred.shape)==1:
X_pred_=np.asarray(X_pred.reshape([1,-1]),dtype=np.float64)
else:
X_pred_=np.asarray(X_pred,dtype=np.float64)
dirn=self.dirn
X_rule_transform_=np.zeros([X_pred.shape[0],1],dtype=np.int32)
monoboost.apply_rules_c(X_pred_,
np.asarray(self.x_base.reshape([1,-1]),dtype=np.float64),
np.asarray(self.nmt_hyperplane.reshape([1,-1]),dtype=np.float64),
np.asarray([self.intercept_],dtype=np.float64),
self.mt_feat_types,
np.float64(dirn),
0,
X_rule_transform_)
X_rule_transform_=X_rule_transform_.ravel()
is_comp=X_rule_transform_
y_pred = np.zeros(X_pred_.shape[0])
y_pred[X_rule_transform_==1]=self.coefs[1]
y_pred[X_rule_transform_==0]=self.coefs[0]
return [y_pred,is_comp]
# def predict_proba(self, X_pred):
# if len(X_pred.shape) == 1:
# X_pred_ = np.zeros([1, len(X_pred)])
# X_pred_[0, :] = X_pred
# else:
# X_pred_ = X_pred
#
# y_pred = np.zeros(X_pred_.shape[0])
# is_comp = np.zeros(X_pred_.shape[0])
# for i in np.arange(len(y_pred)):
# comp = self.comparator.compare(self.x_base, X_pred_[i, :])
# is_comp[i] = 1 if comp == 0 or comp == self.dirn else 0
# y_pred[i] = self.coefs[1] if (
# comp == 0 or comp == self.dirn) else self.coefs[0]
# return [y_pred, is_comp]
def fit_from_cache(
self,
cached_local_hp_data,
X,
y,
res_train,
curr_totals,
hp_reg=None,
hp_reg_c=None):
best = [1e99, -1, -99, -1, [-1, -1]] # err, base, dirn, hp, coefs
for i in np.arange(X.shape[0]):
data_i = cached_local_hp_data[i]
for dirn in [-1, +1]:
data_dirn = data_i[dirn]
vs = data_dirn['vs']
hps = data_dirn['hps']
comp_idxs = data_dirn['comp_idxs']
for i_v in np.arange(len(vs)):
comp_pts = comp_idxs[i_v]
incomp_pts = np.asarray(np.setdiff1d(
np.arange(X.shape[0]), comp_pts))
hp = hps[i_v, :]
res_comp_pts = res_train[comp_pts]
res_incomp_pts = res_train[incomp_pts]
mean_res_in = np.mean(res_comp_pts)
mean_res_out = np.mean(res_incomp_pts)
sse = np.sum((res_train[comp_pts] - mean_res_in)**2) + \
np.sum((res_train[incomp_pts] - mean_res_out)**2)
if hp_reg is not None and len(self.nmt_feats) > 0:
if hp_reg == 'L1_nmt' or hp_reg == 'L2_nmt':
sse = sse + hp_reg_c * \
np.linalg.norm(hp[self.nmt_feats - 1], ord=1
if hp_reg == 'L1_nmt' else
2)**(1 if hp_reg == 'L1_nmt'
else 2)
elif hp_reg == 'L1' or hp_reg == 'L2':
sse = sse + hp_reg_c * \
np.linalg.norm(hp, ord=1 if hp_reg == 'L1' else
2)**(1 if hp_reg == 'L1' else 2)
if sse <= best[0] and len(
comp_pts) > 0:
if self.loss == 'deviance':
sum_res_comp = np.sum(np.abs(res_comp_pts) * (
1 - np.abs(res_comp_pts)))
sum_res_incomp = np.sum(np.abs(res_incomp_pts) * (
1 - np.abs(res_incomp_pts)))
signed_sum_res_comp = np.sum(res_comp_pts)
signed_sum_res_incomp = np.sum(res_incomp_pts)
if (sum_res_comp > 1e-9 and
sum_res_incomp > 1e-9 and
np.abs(signed_sum_res_comp) > 1e-9 and
np.abs(signed_sum_res_incomp) > 1e-9):
coef_in = 0.5 * signed_sum_res_comp / \
(sum_res_comp)
if self.learner_type_code == 0: # two sided
coef_out = 0.5 * signed_sum_res_incomp / \
(sum_res_incomp)
ratio = np.max(
[np.abs(coef_in / coef_out),
np.abs(coef_out / coef_in)])
elif self.learner_type_code == 1: # one-sided
[coef_out, ratio] = [0., 0.5]
else:
coef_in = 0
coef_out = 0
ratio = 0.
elif self.loss == 'rmse':
#use_M-regression (huber loss)
use_huber=True
if use_huber:
q_alpha=0.5
q_in=np.percentile(np.abs(y[comp_pts] - curr_totals[comp_pts]),q_alpha)
res_in=y[comp_pts] - curr_totals[comp_pts]
median_in=np.median(res_in)
coef_in = median_in + (
1/len(comp_pts)*(np.sum(np.sign(res_in-
median_in)*np.min(np.hstack([q_in*np.ones(len(res_in)).reshape([-1,1]),np.abs(res_in-
median_in).reshape([-1,1])]),axis=1))))
if self.learner_type_code == 1:
coef_out=0
else:
q_out=np.percentile(np.abs(y[incomp_pts] - curr_totals[incomp_pts]),q_alpha)
res_out=y[incomp_pts] - curr_totals[incomp_pts]
median_out=np.median(res_out)
coef_out = median_out + (
1/len(incomp_pts)*(np.sum(np.sign(res_out-
median_out)*np.min(np.hstack([q_out*np.ones(len(res_out)).reshape([-1,1]),np.abs(res_out-
median_out).reshape([-1,1])]),axis=1))))
else:
coef_in = np.median(
y[comp_pts] - curr_totals[comp_pts])
coef_out = (0 if self.learner_type_code == 1 else
np.median(y[incomp_pts] -
curr_totals[incomp_pts]))
ratio = 0.
#if np.sign(coef_in) == dirn and np.sign(coef_out) == -dirn (
if coef_in*dirn > coef_out * dirn and (
coef_in != np.inf and coef_out != np.inf and
ratio < 1e9):
best = [
sse, i, dirn, hp, [
coef_out, coef_in]]
self.x_base = X[best[1], :]
self.coefs = best[4]
self.dirn = best[2]
self.nmt_hyperplane = best[3]
return self
def transform(self, X_pred_):
"""Transform dataset.
Parameters
----------
X: array-like matrix
Returns
-------
X_transformed: array-like matrix, shape=(n_samples, 1)
"""
res = np.asarray([1 if self.comparator.compare(self.x_base, X_pred_[
i, :]) * self.dirn in [0, 1] else 0 for i in
np.arange(X_pred_.shape[0])])
return res
class MonoBoost():
""" Partially Monotone Boosting classifier
var
Attributes
----------
eg_attr: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
XXX
See also
--------
YYY
"""
# NB: fit_algo is irrelevant for fit_type='quadratic'
def __init__(self,
n_feats,
incr_feats,
decr_feats,
num_estimators=10,
fit_algo='L2-one-class',
eta=1.,
vs=[0.001,
0.1,
0.25,
0.5,
1],
verbose=False,
hp_reg=None,
hp_reg_c=None,
incomp_pred_type='default',
learner_type='one-sided',
random_state=None,
standardise=True,
classes=None,
loss='auto'):
self.X = None
self.y = None
self.classes=classes
self.n_feats = n_feats
self.incr_feats = np.asarray(incr_feats)
self.decr_feats = np.asarray(decr_feats)
self.mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
self.nmt_feats = np.asarray(
[j for j in np.arange(n_feats) + 1 if j not in self.mt_feats])
self.mt_feat_types=np.zeros(n_feats,dtype=np.float64)
if len(self.incr_feats)>0:
self.mt_feat_types[self.incr_feats-1]=+1.
if len(self.decr_feats)>0:
self.mt_feat_types[self.decr_feats-1]=-1.
self.fitted = False
self.standardise = standardise
self.fit_algo = fit_algo
self.eta = eta
self.num_estimators = num_estimators
self.vs = vs
self.mt_comparator = MonoComparator(
n_feats, incr_feats, decr_feats, nmt_hyperplane=None)
self.verbose = verbose
self.hp_reg = hp_reg
self.hp_reg_c = hp_reg_c
self.y_pred_num_comp_ = None
self.incomp_pred_type = incomp_pred_type
self.learner_type = learner_type
self.random_state = np.random.randint(
1e6) if random_state is None else random_state
np.random.seed(self.random_state)
self.loss = loss#'auto'
self.__estimators_base_pts__=None
self.__estimators_dirns__=None
self.__estimators_intercepts__=None
self.__estimators_hyperplanes__=None
def get_estimator_matrices(self):
if self.__estimators_base_pts__ is None:
self.__estimators_base_pts__={}
self.__estimators_dirns__={}
self.__estimators_hyperplanes__={}
self.__estimators_intercepts__={}
self.__estimators_coefs__={}
for k in self.ks:
self.__estimators_base_pts__[k]=np.asarray([est.x_base for est in self.estimators[k]],dtype=np.float64)
self.__estimators_dirns__[k]=np.asarray([est.dirn for est in self.estimators[k]],dtype=np.float64)
self.__estimators_hyperplanes__[k]=np.asarray([est.nmt_hyperplane for est in self.estimators[k]],dtype=np.float64)
self.__estimators_intercepts__[k]=np.asarray([est.intercept_ for est in self.estimators[k]],dtype=np.float64)
self.__estimators_coefs__[k]=np.asarray([est.coefs for est in self.estimators[k]],dtype=np.float64)
return [self.__estimators_base_pts__,
self.__estimators_dirns__,
self.__estimators_intercepts__,
self.__estimators_hyperplanes__,
self.__estimators_coefs__]
@property
def y_maj_class_calc(self):
"""I'm the 'x' property."""
return -1 if np.sum(self.y == -1) / len(self.y) >= 0.5 else +1
@property
def y_pred_num_comp(self):
"""I'm the 'x' property."""
if not hasattr(self, 'y_pred_num_comp_'):
self.y_pred_num_comp_ = None
if self.y_pred_num_comp_ is None:
[ypred, num_comp] = self.predict_proba(self.X)
self.y_pred_num_comp_ = num_comp
return self.y_pred_num_comp_
def solve_hp(self, incr_feats, decr_feats, delta_X, v, weights=None):
N = delta_X.shape[0]
p = delta_X.shape[1]
num_feats = p
mt_feats = np.asarray(list(incr_feats) + list(decr_feats))
nmt_feats = np.asarray(
[j for j in np.arange(num_feats) + 1 if j not in mt_feats])
solvers.options['show_progress'] = False
if N == 0:
return [-99]
else:
# Build QP matrices
# Minimize 1/2 x^T P x + q^T x
# Subject to G x <= h
# A x = b
if weights is None:
weights = np.ones(N)
P = np.zeros([p + N, p + N])
for ip in nmt_feats - 1:
P[ip, ip] = 1
q = 1 / (N * v) * np.ones((N + p, 1))
q[0:p, 0] = 0
q[p:, 0] = q[p:, 0] * weights
G1a = np.zeros([p, p])
for ip in np.arange(p):
G1a[ip, ip] = -1 if ip in mt_feats - 1 else 1
G1 = np.hstack([G1a, np.zeros([p, N])])
G2 = np.hstack([np.zeros([N, p]), -np.eye(N)])
G3 = np.hstack([delta_X, -np.eye(N)])
G = np.vstack([G1, G2, G3])
h = | np.zeros([p + 2 * N]) | numpy.zeros |
'''
COTR dataset
'''
import random
import time
import numpy as np
import torch
from torchvision.transforms import functional as tvtf
from torch.utils import data
#import cv2
import imutils
from COTR.datasets import tracking_datasets
from COTR.utils import debug_utils, utils, constants
import COTR.datasets.utils as tracking_utils
from torchvision.transforms import ColorJitter
from PIL import Image
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.detach().cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def im_to_numpy(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # H*W*C
return img
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def crop(img, center, scale, res, rot=0):
img = im_to_numpy(img)
# Preprocessing for efficient cropping
ht, wd = img.shape[0], img.shape[1]
sf = scale * 200.0 / res[0]
if sf < 2:
sf = 1
else:
new_size = int(np.math.floor(max(ht, wd) / sf))
new_ht = int(np.math.floor(ht / sf))
new_wd = int( | np.math.floor(wd / sf) | numpy.math.floor |
import numpy as np
# power for Manning's hydraulic radius term
mpow = 2.0 / 3.0
def calculate_rectchan_mannings_discharge(
conversion_factor, roughness, slope, width, depth
):
"""
Calculate Manning's discharge for a rectangular channel.
"""
area = width * depth
return conversion_factor * area * depth ** mpow * slope ** 0.5 / roughness
# n-point cross-section functions
def get_wetted_station(
x0,
x1,
h0,
h1,
depth,
):
"""Get the wetted length in the x-direction"""
# -- calculate the minimum and maximum depth
hmin = min(h0, h1)
hmax = max(h0, h1)
# -- if depth is less than or equal to the minimum value the
# station length (xlen) is zero
if depth <= hmin:
x1 = x0
# -- if depth is between hmin and hmax, station length is less
# than h1 - h0
elif depth < hmax:
xlen = x1 - x0
dlen = h1 - h0
if abs(dlen) > 0.0:
slope = xlen / dlen
else:
slope = 0.0
if h0 > h1:
dx = (depth - h1) * slope
xt = x1 + dx
xt0 = xt
xt1 = x1
else:
dx = (depth - h0) * slope
xt = x0 + dx
xt0 = x0
xt1 = xt
x0 = xt0
x1 = xt1
return x0, x1
def get_wetted_perimeter(
x0,
x1,
h0,
h1,
depth,
):
# -- calculate the minimum and maximum depth
hmin = min(h0, h1)
hmax = max(h0, h1)
# -- calculate the wetted perimeter for the segment
xlen = x1 - x0
if xlen > 0.0:
if depth > hmax:
dlen = hmax - hmin
else:
dlen = depth - hmin
else:
if depth > hmin:
dlen = min(depth, hmax) - hmin
else:
dlen = 0.0
return np.sqrt(xlen ** 2.0 + dlen ** 2.0)
def get_wetted_area(x0, x1, h0, h1, depth):
# -- calculate the minimum and maximum depth
hmin = min(h0, h1)
hmax = max(h0, h1)
# -- calculate the wetted area for the segment
xlen = x1 - x0
area = 0.0
if xlen > 0.0:
# -- add the area above hmax
if depth > hmax:
area = xlen * (depth - hmax)
# -- add the area below zmax
if hmax != hmin and depth > hmin:
area += 0.5 * (depth - hmin)
return area
def wetted_area(
x,
h,
depth,
verbose=False,
):
area = 0.0
if x.shape[0] == 1:
area = x[0] * depth
else:
for idx in range(0, x.shape[0] - 1):
x0, x1 = x[idx], x[idx + 1]
h0, h1 = h[idx], h[idx + 1]
# get station data
x0, x1 = get_wetted_station(x0, x1, h0, h1, depth)
# get wetted area
a = get_wetted_area(x0, x1, h0, h1, depth)
area += a
# write to screen
if verbose:
print(
f"{idx}->{idx + 1} ({x0},{x1}) - "
f"perimeter={x1 - x0} - area={a}"
)
return area
def wetted_perimeter(
x,
h,
depth,
verbose=False,
):
perimeter = 0.0
if x.shape[0] == 1:
perimeter = x[0]
else:
for idx in range(0, x.shape[0] - 1):
x0, x1 = x[idx], x[idx + 1]
h0, h1 = h[idx], h[idx + 1]
# get station data
x0, x1 = get_wetted_station(x0, x1, h0, h1, depth)
# get wetted perimeter
perimeter += get_wetted_perimeter(x0, x1, h0, h1, depth)
# write to screen
if verbose:
print(f"{idx}->{idx + 1} ({x0},{x1}) - perimeter={x1 - x0}")
return perimeter
def manningsq(
x,
h,
depth,
roughness=0.01,
slope=0.001,
conv=1.0,
):
if isinstance(roughness, float):
roughness = | np.ones(x.shape, dtype=float) | numpy.ones |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
import logging
import os
import time
import numpy as np
import numpy.ma as ma
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.nn import functional as F
import cv2
from utils.utils import AverageMeter
from utils.utils import get_confusion_matrix
from utils.utils import adjust_learning_rate
from utils.utils import get_world_size, get_rank
def reduce_tensor(inp):
"""
Reduce the loss from all processes so that
process with rank 0 has the averaged results.
"""
world_size = get_world_size()
if world_size < 2:
return inp
with torch.no_grad():
reduced_inp = inp
dist.reduce(reduced_inp, dst=0)
return reduced_inp
def train(config, epoch, num_epoch, epoch_iters, base_lr, num_iters,
trainloader, optimizer, model, writer_dict, device):
# Training
model.train()
batch_time = AverageMeter()
ave_loss = AverageMeter()
ave_loss1 = AverageMeter()
ave_aux_loss = AverageMeter()
ave_error_loss = AverageMeter()
ave_loss2 = AverageMeter()
ave_aux_loss_2 = AverageMeter()
ave_error_loss_2 = AverageMeter()
tic = time.time()
cur_iters = epoch*epoch_iters
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
rank = get_rank()
world_size = get_world_size()
for i_iter, batch in enumerate(trainloader):
images, labels, boundary_gt, _, _ = batch
images = images.to(device)
labels = labels.long().to(device)
boundary_gt = boundary_gt.to(device)
losses, aux_loss, error_loss, losses_2, aux_loss_2, error_loss_2, _ = model(images, labels, boundary_gt.float())
# print('pred', pred[2].size())
loss = losses.mean() + 0.4 * aux_loss.mean() + 4 * error_loss.mean() + losses_2.mean() + 0.4 * aux_loss_2.mean() + 4 * error_loss_2.mean()
reduced_loss = reduce_tensor(loss)
loss1 = reduce_tensor(losses)
aux_loss = reduce_tensor(aux_loss)
error_losses = reduce_tensor(error_loss)
loss2 = reduce_tensor(losses_2)
aux_loss_2 = reduce_tensor(aux_loss_2)
error_losses_2 = reduce_tensor(error_loss_2)
model.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - tic)
tic = time.time()
# update average loss
ave_loss.update(reduced_loss.item())
ave_loss1.update(loss1.item())
ave_aux_loss.update(aux_loss.item())
ave_error_loss.update(error_losses.item())
ave_loss2.update(loss2.item())
ave_aux_loss_2.update(aux_loss_2.item())
ave_error_loss_2.update(error_losses_2.item())
lr = adjust_learning_rate(optimizer,
base_lr,
num_iters,
i_iter+cur_iters)
if i_iter % config.PRINT_FREQ == 0 and rank == 0:
print_loss = ave_loss.average() / world_size
print_loss1 = ave_loss1.average() / world_size
print_loss_aux = ave_aux_loss.average() / world_size
print_error_loss = ave_error_loss.average() / world_size
print_loss2 = ave_loss2.average() / world_size
print_loss_aux_2 = ave_aux_loss_2.average() / world_size
print_error_loss_2 = ave_error_loss_2.average() / world_size
msg = 'Epoch: [{}/{}] Iter:[{}/{}], Time: {:.2f}, ' \
'lr: {:.6f}, Loss: {:.6f}, Loss_1: {:.6f}, Loss_aux: {:.6f}, error_loss: {:.6f}, Loss_2: {:.6f}, Loss_aux_2: {:.6f}, error_loss_2: {:.6f}' .format(
epoch, num_epoch, i_iter, epoch_iters,
batch_time.average(), lr, print_loss, print_loss1, print_loss_aux, print_error_loss, print_loss2, print_loss_aux_2, print_error_loss_2)
logging.info(msg)
writer.add_scalar('train_loss', print_loss, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
def validate(config, testloader, model, writer_dict, device):
rank = get_rank()
world_size = get_world_size()
model.eval()
ave_loss = AverageMeter()
confusion_matrix = np.zeros(
(config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
confusion_matrix_sum = np.zeros(
(config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
with torch.no_grad():
for _, batch in enumerate(testloader):
image, label, boundary_gt, _, _ = batch
size = label.size()
image = image.to(device)
boundary_gt = boundary_gt.to(device)
label = label.long().to(device)
losses, aux_loss, error_loss, losses_2, aux_loss_2, error_loss_2, preds = model(image, label, boundary_gt.float())
pred = F.upsample(input=preds[0], size=(
size[-2], size[-1]), mode='bilinear')
loss = (losses + 0.4 * aux_loss + 4 * error_loss + losses_2 + 0.4 * aux_loss_2 + 4 * error_loss_2).mean()
reduced_loss = reduce_tensor(loss)
ave_loss.update(reduced_loss.item())
confusion_matrix += get_confusion_matrix(
label,
pred,
size,
config.DATASET.NUM_CLASSES,
config.TRAIN.IGNORE_LABEL)
confusion_matrix = torch.from_numpy(confusion_matrix).to(device)
reduced_confusion_matrix = reduce_tensor(confusion_matrix)
confusion_matrix = reduced_confusion_matrix.cpu().numpy()
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
print_loss = ave_loss.average()/world_size
if rank == 0:
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar('valid_loss', print_loss, global_steps)
writer.add_scalar('valid_mIoU', mean_IoU, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
# cv2.imwrite(str(global_steps)+'_boundary.png', (preds[0][0][0].data.cpu().numpy()*255).astype(np.uint8))
# cv2.imwrite(str(global_steps) + '_error.png', (preds[2][0][0].data.cpu().numpy() * 255).astype(np.uint8))
cv2.imwrite(str(global_steps) + '_error.png', (preds[2][0][0].data.cpu().numpy() * 255).astype(np.uint8))
return print_loss, mean_IoU, IoU_array
def testval(config, test_dataset, testloader, model,
sv_dir='', sv_pred=False):
model.eval()
confusion_matrix = np.zeros(
(config.DATASET.NUM_CLASSES, config.DATASET.NUM_CLASSES))
with torch.no_grad():
for index, batch in enumerate(tqdm(testloader)):
image, label, boundary_gt, _, name = batch
size = label.size()
pred = test_dataset.multi_scale_inference(
model,
image,
scales=config.TEST.SCALE_LIST,
flip=config.TEST.FLIP_TEST)
if pred.size()[-2] != size[-2] or pred.size()[-1] != size[-1]:
pred = F.upsample(pred, (size[-2], size[-1]),
mode='bilinear')
confusion_matrix += get_confusion_matrix(
label,
pred,
size,
config.DATASET.NUM_CLASSES,
config.TRAIN.IGNORE_LABEL)
if sv_pred:
sv_path = os.path.join(sv_dir,'test_val_results')
if not os.path.exists(sv_path):
os.mkdir(sv_path)
test_dataset.save_pred(pred, sv_path, name)
if index % 100 == 0:
logging.info('processing: %d images' % index)
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IoU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IoU = IoU_array.mean()
logging.info('mIoU: %.4f' % (mean_IoU))
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
pixel_acc = tp.sum()/pos.sum()
mean_acc = (tp/np.maximum(1.0, pos)).mean()
IoU_array = (tp / | np.maximum(1.0, pos + res - tp) | numpy.maximum |
# This example demonstrates how to apply the projection model in calibration
import numpy
numpy.set_printoptions(precision=5, suppress=True)
from camera_model import *
from slab_projection import *
from calibration import *
# Noise level in pixels
sigma = 0.1
# Slab parameters
elevation, azimuth = 0.5, 0.0
tau = 0.006
nu = 1.55
# Camera parameters
K = numpy.array([[1200.0, 0.0, 600.0], [0.0, 1200.0, 400.0], [0.0, 0.0, 1.0]])
r1 = -0.3
r2 = -0.2
rotX, rotY, rotZ = 0.0, 0.0, 0.0
t = numpy.array([0.0, 0.0, 0.0])
# Camera model
reference_camera = CameraModel(Pixel(K), RadialDistortion(r1, r2),
ApproximateSlabProjection(elevation, azimuth, tau, nu),
EulerPose(rotX, rotY, rotZ, t))
calibrated_camera = CameraModel(Pixel(numpy.diag([1000.0, 1000.0, 1.0])), RadialDistortion(0.0, 0.0),
ApproximateSlabProjection(0.0, 0.0, 0.006, 1.55),
EulerPose(rotX, rotY, rotZ, t))
calibrated_camera = CameraModel(Pixel(K), RadialDistortion(r1, r2),
ApproximateSlabProjection(0.0, 0.0, 0.01, 1.0),
EulerPose(0.0, 0.0, 0.0, numpy.array([0.0, 0.0, 0.0])))
space = []
image = []
for x in numpy.arange(-2.0, 3.0, 1.0):
for y in | numpy.arange(-2.0, 3.0, 1.0) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 16 15:18:08 2021.
@author: <NAME>
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['DejaVu Sans']})
rc('text',usetex=True)
params={'text.latex.preamble':[r'\usepackage{amsmath}',r'\usepackage{amssymb}']}
plt.rcParams.update(params);
from scipy import stats
# --------------------------------------------------------------------------- #
model_name = 'resnet18'#'resnet34'#'efficientnetb0'#'densenet121'#
model_name_str = 'ResNet-18'# 'ResNet-34'#'EfficientNet-B0'#'DenseNet-121'#'
splitLayer ='add_3'#'block2b_add'#'pool2_conv'#'add_1'#
rowsPerPacket = 4
quant1 = 8
quant2 = 8
results_dir = 'simData'
dataset = 'largeTest'
output_dir = os.path.join('mc_results',dataset,model_name+'_'+splitLayer)
os.makedirs(output_dir,exist_ok=True)
file_str = 'GC_'+str(quant1)+'Bits_'+str(quant2)+'Bits_rpp_'+str(rowsPerPacket)+'_MC_'
# --------------------------------------------------------------------------- #
lossProbability = [0.3,0.2,0.1,0.01]
burstLength = [1,2,3,4,5,6,7] # [1,2,3,4,5,6,7,8,9,10,11,12,13,14] #
step_MC = 5
num_MC = 20
MC_index = [i for i in range(0,num_MC+step_MC,step_MC)]
cloud_top1_acc = np.zeros([len(lossProbability),len(burstLength),num_MC],dtype=np.float64)
cloud_top5_acc = np.zeros_like(cloud_top1_acc)
full_top1_acc = np.zeros_like(cloud_top1_acc)
full_top5_acc = np.zeros_like(cloud_top1_acc)
caltec_top1_acc = np.zeros_like(cloud_top1_acc)
caltec_top5_acc = np.zeros_like(caltec_top1_acc)
altec_top1_acc = np.zeros_like(caltec_top1_acc)
altec_top5_acc = np.zeros_like(altec_top1_acc)
halrtc_top1_acc = np.zeros_like(caltec_top1_acc)
halrtc_top5_acc = np.zeros_like(caltec_top5_acc)
silrtc_top1_acc = np.zeros_like(halrtc_top1_acc)
silrtc_top5_acc = np.zeros_like(silrtc_top1_acc)
ns_top1_acc = np.zeros_like(caltec_top1_acc)
ns_top5_acc = np.zeros_like(caltec_top5_acc)
cloud_top1_mean = np.zeros([len(lossProbability),len(burstLength)],dtype=np.float64)
cloud_top5_mean = np.zeros_like(cloud_top1_mean)
full_top1_mean = np.zeros_like(cloud_top1_mean)
full_top5_mean = np.zeros_like(cloud_top1_mean)
caltec_top1_mean = np.zeros_like(cloud_top1_mean)
caltec_top5_mean = np.zeros_like(cloud_top5_mean)
altec_top1_mean = np.zeros_like(cloud_top1_mean)
altec_top5_mean = np.zeros_like(cloud_top5_mean)
halrtc_top1_mean = np.zeros_like(caltec_top1_mean)
halrtc_top5_mean = np.zeros_like(caltec_top1_mean)
silrtc_top1_mean = np.zeros_like(caltec_top1_mean)
silrtc_top5_mean = np.zeros_like(caltec_top1_mean)
ns_top1_mean = np.zeros_like(caltec_top1_mean)
ns_top5_mean = np.zeros_like(caltec_top5_mean)
# --------------------------------------------------------------------------- #
for i_lp in range(len(lossProbability)):
for i_bl in range(len(burstLength)):
print(f'loss probability {lossProbability[i_lp]} burst length {burstLength[i_bl]}')
df_results = pd.read_csv(os.path.join(results_dir,dataset,model_name,splitLayer+'_lp_'+str(lossProbability[i_lp])+'_Bl_'+str(burstLength[i_bl]),file_str+str(MC_index[0])+'_'+str(MC_index[-1])+'_.csv'))
full_top1_acc[i_lp,i_bl,:] = df_results['full_top1_accuracy'].to_numpy()
cloud_top1_acc[i_lp,i_bl,:] = df_results['cloud_top1_accuracy'].to_numpy()
full_top5_acc[i_lp,i_bl,:] = df_results['full_top5_accuracy'].to_numpy()
cloud_top5_acc[i_lp,i_bl,:] = df_results['cloud_top5_accuracy'].to_numpy()
full_top1_mean[i_lp,i_bl] = np.mean(full_top1_acc[i_lp,i_bl,:])
full_top5_mean[i_lp,i_bl] = np.mean(full_top5_acc[i_lp,i_bl,:])
cloud_top1_mean[i_lp,i_bl] = np.mean(cloud_top1_acc[i_lp,i_bl,:])
cloud_top5_mean[i_lp,i_bl] = np.mean(cloud_top5_acc[i_lp,i_bl,:])
for i_mc in range(len(MC_index)-1):
df_results = pd.read_csv(os.path.join(results_dir,dataset,model_name,splitLayer+'_lp_'+str(lossProbability[i_lp])+'_Bl_'+str(burstLength[i_bl]),file_str+str(MC_index[i_mc])+'_'+str(MC_index[i_mc+1])+'_CALTeC.csv'))
caltec_top1_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top1_accuracy'].to_numpy()
caltec_top5_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top5_accuracy'].to_numpy()
df_results = pd.read_csv(os.path.join(results_dir,dataset,model_name,splitLayer+'_lp_'+str(lossProbability[i_lp])+'_Bl_'+str(burstLength[i_bl]),file_str+str(MC_index[i_mc])+'_'+str(MC_index[i_mc+1])+'_ALTeC.csv'))
altec_top1_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top1_accuracy'].to_numpy()
altec_top5_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top5_accuracy'].to_numpy()
df_results = pd.read_csv(os.path.join(results_dir,dataset,model_name,splitLayer+'_lp_'+str(lossProbability[i_lp])+'_Bl_'+str(burstLength[i_bl]),file_str+str(MC_index[i_mc])+'_'+str(MC_index[i_mc+1])+'_HaLRTC.csv'))
halrtc_top1_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top1_accuracy'].to_numpy()
halrtc_top5_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top5_accuracy'].to_numpy()
df_results = pd.read_csv(os.path.join(results_dir,dataset,model_name,splitLayer+'_lp_'+str(lossProbability[i_lp])+'_Bl_'+str(burstLength[i_bl]),file_str+str(MC_index[i_mc])+'_'+str(MC_index[i_mc+1])+'_SiLRTC.csv'))
silrtc_top1_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top1_accuracy'].to_numpy()
silrtc_top5_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top5_accuracy'].to_numpy()
df_results = pd.read_csv(os.path.join(results_dir,dataset,model_name,splitLayer+'_lp_'+str(lossProbability[i_lp])+'_Bl_'+str(burstLength[i_bl]),file_str+str(MC_index[i_mc])+'_'+str(MC_index[i_mc+1])+'_InpaintNS.csv'))
ns_top1_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top1_accuracy'].to_numpy()
ns_top5_acc[i_lp,i_bl,MC_index[i_mc]:MC_index[i_mc+1]] = df_results['mc_repaired_top5_accuracy'].to_numpy()
caltec_top1_mean[i_lp,i_bl] = np.mean(caltec_top1_acc[i_lp,i_bl,:])
caltec_top5_mean[i_lp,i_bl] = np.mean(caltec_top5_acc[i_lp,i_bl,:])
altec_top1_mean[i_lp,i_bl] = np.mean(altec_top1_acc[i_lp,i_bl,:])
altec_top5_mean[i_lp,i_bl] = np.mean(altec_top5_acc[i_lp,i_bl,:])
halrtc_top1_mean[i_lp,i_bl] = np.mean(halrtc_top1_acc[i_lp,i_bl,:])
halrtc_top5_mean[i_lp,i_bl] = np.mean(halrtc_top5_acc[i_lp,i_bl,:])
silrtc_top1_mean[i_lp,i_bl] = np.mean(silrtc_top1_acc[i_lp,i_bl,:])
silrtc_top5_mean[i_lp,i_bl] = np.mean(silrtc_top5_acc[i_lp,i_bl,:])
ns_top1_mean[i_lp,i_bl] = np.mean(ns_top1_acc[i_lp,i_bl,:])
ns_top5_mean[i_lp,i_bl] = np.mean(ns_top5_acc[i_lp,i_bl,:])
# --------------------------------------------------------------------------- #
color_list = ['limegreen','crimson','dimgray','darkorange','mediumblue','magenta','cyan','darkviolet','saddlebrown','maroon']
marker_list = ['^','2','d','>','.','+','s','x','<','o','v']
chosen_fontsize = 12
for i_lp in range(len(lossProbability)):
metric_str = 'Top1'
fig = plt.figure(i_lp);
ax = fig.gca();
plt.rc('text',usetex=True)
params= {'legend.fontsize':chosen_fontsize,'legend.handlelength':2}
plt.rcParams.update(params)
plt.title(r'\textbf{Monte Carlo experiments with} \verb|%s| \verb|%s| \textbf{tensors with} $P_B = %s$' %(model_name_str,splitLayer,str(lossProbability[i_lp])),fontsize=chosen_fontsize)
plt.ylim(-0.05,1.05)
plt.plot(burstLength,full_top1_mean[i_lp,:],marker=marker_list[0],color=color_list[0],label=r'Top-1 w/ No Loss')
plt.plot(burstLength,cloud_top1_mean[i_lp,:],marker=marker_list[1],color=color_list[1],label=r'Top-1 w/ No Completion')
plt.plot(burstLength,caltec_top1_mean[i_lp,:],marker=marker_list[2],color=color_list[2],label=r'Top-1 w/ CALTeC')
plt.plot(burstLength,altec_top1_mean[i_lp,:],marker=marker_list[3],color=color_list[3],label=r'Top-1 w/ ALTeC')
plt.plot(burstLength,halrtc_top1_mean[i_lp,:],marker=marker_list[4],color=color_list[4],label=r'Top-1 w/ HaLRTC')
plt.plot(burstLength,silrtc_top1_mean[i_lp,:],marker=marker_list[5],color=color_list[5],label=r'Top-1 w/ SiLRTC')
plt.plot(burstLength,ns_top1_mean[i_lp,:],marker=marker_list[6],color=color_list[6],label=r'Top-1 w/ Navier-Stokes')
plt.xticks(burstLength,fontsize=chosen_fontsize)
ax.set_xlabel(r'$L_B$',fontsize=chosen_fontsize)
ax.set_ylabel(r'\textbf{Accuracy}',fontsize=chosen_fontsize)
plt.yticks([0.,0.3,0.5,0.7,0.9],fontsize=chosen_fontsize)
plt.grid()
plt.legend(loc='best')
plt.tight_layout()
fig.savefig(os.path.join(output_dir,'rpp_'+str(rowsPerPacket)+'_lp_'+str(lossProbability[i_lp])+'_'+metric_str+'.pdf'))
fig.clf()
plt.close()
for i_lp in range(len(lossProbability)):
metric_str = 'Top5'
fig = plt.figure(i_lp);
ax = fig.gca();
plt.rc('text',usetex=True)
params= {'legend.fontsize':chosen_fontsize,'legend.handlelength':2}
plt.rcParams.update(params)
plt.title(r'\textbf{Monte Carlo experiments with} \verb|%s| \verb|%s| \textbf{tensors with} $P_B = %s$' %(model_name_str,splitLayer,str(lossProbability[i_lp])),fontsize=chosen_fontsize)
plt.ylim(-0.05,1.05)
plt.plot(burstLength,full_top5_mean[i_lp,:],linestyle='dashed',marker=marker_list[0],color=color_list[0],label=r'Top-5 w/ No Loss')
plt.plot(burstLength,cloud_top5_mean[i_lp,:],linestyle='dashed',marker=marker_list[1],color=color_list[1],label=r'Top-5 w/ No Completion')
plt.plot(burstLength,caltec_top5_mean[i_lp,:],linestyle='dashed',marker=marker_list[2],color=color_list[2],label=r'Top-5 w/ CALTeC')
plt.plot(burstLength,altec_top5_mean[i_lp,:],linestyle='dashed',marker=marker_list[3],color=color_list[3],label=r'Top-5 w/ ALTeC')
plt.plot(burstLength,halrtc_top5_mean[i_lp,:],linestyle='dashed',marker=marker_list[4],color=color_list[4],label=r'Top-5 w/ HaLRTC')
plt.plot(burstLength,silrtc_top5_mean[i_lp,:],linestyle='dashed',marker=marker_list[5],color=color_list[5],label=r'Top-5 w/ SiLRTC')
plt.plot(burstLength,ns_top5_mean[i_lp,:],linestyle='dashed',marker=marker_list[6],color=color_list[6],label=r'Top-5 w/ Navier-Stokes')
plt.xticks(burstLength,fontsize=chosen_fontsize)
ax.set_xlabel(r'$L_B$',fontsize=chosen_fontsize)
ax.set_ylabel(r'\textbf{Accuracy}',fontsize=chosen_fontsize)
plt.yticks([0.,0.3,0.5,0.7,0.9],fontsize=chosen_fontsize)
plt.grid()
plt.legend(loc='best')
plt.tight_layout()
fig.savefig(os.path.join(output_dir,'rpp_'+str(rowsPerPacket)+'_lp_'+str(lossProbability[i_lp])+'_'+metric_str+'.pdf'))
fig.clf()
plt.close()
# --------------------------------------------------------------------------- #
for i_lp in range(len(lossProbability)):
bl_top1_mean = np.mean(full_top1_acc[i_lp,:,:],axis=1)
bl_top1_std = np.std(full_top1_acc[i_lp,:,:],axis=1)
bl_top5_mean = np.mean(full_top5_acc[i_lp,:,:],axis=1)
bl_top5_std = np.std(full_top5_acc[i_lp,:,:],axis=1)
bl_nc_top1_mean = np.mean(cloud_top1_acc[i_lp,:,:],axis=1)
bl_nc_top1_std = np.std(cloud_top1_acc[i_lp,:,:],axis=1)
bl_nc_top5_mean = np.mean(cloud_top5_acc[i_lp,:,:],axis=1)
bl_nc_top5_std = np.std(cloud_top5_acc[i_lp,:,:],axis=1)
bl_caltec_top1_mean = np.mean(caltec_top1_acc[i_lp,:,:],axis=1)
bl_caltec_top1_std = np.std(caltec_top1_acc[i_lp,:,:],axis=1)
bl_caltec_top5_mean = np.mean(caltec_top5_acc[i_lp,:,:],axis=1)
bl_caltec_top5_std = np.std(caltec_top5_acc[i_lp,:,:],axis=1)
bl_altec_top1_mean = np.mean(altec_top1_acc[i_lp,:,:],axis=1)
bl_altec_top1_std = np.std(altec_top1_acc[i_lp,:,:],axis=1)
bl_altec_top5_mean = np.mean(altec_top5_acc[i_lp,:,:],axis=1)
bl_altec_top5_std = np.std(altec_top5_acc[i_lp,:,:],axis=1)
bl_halrtc_top1_mean = np.mean(halrtc_top1_acc[i_lp,:,:],axis=1)
bl_halrtc_top1_std = np.std(halrtc_top1_acc[i_lp,:,:],axis=1)
bl_halrtc_top5_mean = np.mean(halrtc_top5_acc[i_lp,:,:],axis=1)
bl_halrtc_top5_std = np.std(halrtc_top5_acc[i_lp,:,:],axis=1)
bl_silrtc_top1_mean = np.mean(silrtc_top1_acc[i_lp,:,:],axis=1)
bl_silrtc_top1_std = np.std(silrtc_top1_acc[i_lp,:,:],axis=1)
bl_silrtc_top5_mean = np.mean(silrtc_top5_acc[i_lp,:,:],axis=1)
bl_silrtc_top5_std = np.std(silrtc_top5_acc[i_lp,:,:],axis=1)
bl_ns_top1_mean = np.mean(ns_top1_acc[i_lp,:,:],axis=1)
bl_ns_top1_std = np.std(ns_top1_acc[i_lp,:,:],axis=1)
bl_ns_top5_mean = np.mean(ns_top5_acc[i_lp,:,:],axis=1)
bl_ns_top5_std = np.std(ns_top5_acc[i_lp,:,:],axis=1)
df_lp = pd.DataFrame({'NL_Top1_mean':bl_top1_mean,'NL_Top1_std':bl_top1_std,'NL_Top5_mean':bl_top5_mean,'NL_Top5_std':bl_top5_std,
'NC_Top1_mean':bl_nc_top1_mean,'NC_Top1_std':bl_nc_top1_std,'NC_Top5_mean':bl_nc_top5_mean,'NC_Top5_std':bl_nc_top5_std,
'CALTeC_Top1_mean':bl_caltec_top1_mean,'CALTeC_Top1_std':bl_caltec_top1_std,'CALTeC_Top5_mean':bl_caltec_top5_mean,'CALTeC_Top5_std':bl_caltec_top5_std,
'ALTeC_Top1_mean':bl_altec_top1_mean,'ALTeC_Top1_std':bl_altec_top1_std,'ALTeC_Top5_mean':bl_altec_top5_mean,'ALTeC_Top5_std':bl_altec_top5_std,
'HaLRTC_Top1_mean':bl_halrtc_top1_mean,'HaLRTC_Top1_std':bl_halrtc_top1_std,'HaLRTC_Top5_mean':bl_halrtc_top5_mean,'HaLRTC_Top5_std':bl_halrtc_top5_std,
'SiLRTC_Top1_mean':bl_silrtc_top1_mean,'SilRTC_Top1_std':bl_silrtc_top1_std,'SiLRTC_Top5_mean':bl_silrtc_top5_mean,'SiLRTC_Top5_std':bl_silrtc_top5_std,
'NS_Top1_mean':bl_ns_top1_mean,'NS_Top1_std':bl_ns_top1_std,'NS_Top5_mean':bl_ns_top5_mean,'NS_Top5_std':bl_ns_top5_std
})
df_lp.to_csv(os.path.join(output_dir,'ImgClass_MonteCarlo_rpp_'+str(rowsPerPacket)+'_lp_'+str(lossProbability[i_lp])+'.csv'))
# df_lp.to_latex(os.path.join(output_dir,'ImgClass_MonteCarlo'+'rpp_'+str(rowsPerPacket)+'_lp_'+str(lossProbability[i_lp])+'.tex'))
print('Calculating stats')
full_top1_acc_lp = np.reshape(full_top1_acc,(len(lossProbability),len(burstLength)*(num_MC)))
cloud_top1_acc_lp = np.reshape(cloud_top1_acc,(len(lossProbability),len(burstLength)*(num_MC)))
caltec_top1_lp = np.reshape(caltec_top1_acc,(len(lossProbability),len(burstLength)*(num_MC)))
altec_top1_lp = np.reshape(altec_top1_acc,(len(lossProbability),len(burstLength)*(num_MC)))
halrtc_top1_lp = np.reshape(halrtc_top1_acc,(len(lossProbability),len(burstLength)*(num_MC)))
silrtc_top1_lp = np.reshape(silrtc_top1_acc,(len(lossProbability),len(burstLength)*(num_MC)))
ns_top1_lp = np.reshape(ns_top1_acc,(len(lossProbability),len(burstLength)*(num_MC)))
full_top5_acc_lp = np.reshape(full_top5_acc,(len(lossProbability),len(burstLength)*(num_MC)))
cloud_top5_acc_lp = np.reshape(cloud_top5_acc,(len(lossProbability),len(burstLength)*(num_MC)))
caltec_top5_lp = np.reshape(caltec_top5_acc,(len(lossProbability),len(burstLength)*(num_MC)))
altec_top5_lp = np.reshape(altec_top5_acc,(len(lossProbability),len(burstLength)*(num_MC)))
halrtc_top5_lp = np.reshape(halrtc_top5_acc,(len(lossProbability),len(burstLength)*(num_MC)))
silrtc_top5_lp = np.reshape(silrtc_top5_acc,(len(lossProbability),len(burstLength)*(num_MC)))
ns_top5_lp = np.reshape(ns_top5_acc,(len(lossProbability),len(burstLength)*(num_MC)))
t_caltec_mean_top1 = np.mean(caltec_top1_lp,axis=1)
t_caltec_std_top1 = | np.std(caltec_top1_lp) | numpy.std |
import numpy as np
import nose.tools as ntools
import hiddenmm.model.markov_chain as mc
import hiddenmm.model.discrete_hidden_mm as dhmm
import hiddenmm.constants as cnst
def test_successful_creation():
pi = np.array([0.1, 0.9])
a = np.array([
[0.1, 0.9],
[0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.3]
])
dhmm.DiscreteHiddenMM(chain, b)
@ntools.raises(ValueError)
def test_dimension_mismatch():
pi = np.array([0.1, 0.9])
a = np.array([
[0.1, 0.9],
[0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.3],
[0.1, 0.1, 0.2, 0.3, 0.3]
])
dhmm.DiscreteHiddenMM(chain, b)
@ntools.raises(ValueError)
def test_distribution_does_not_sum_up():
pi = np.array([0.1, 0.9])
a = np.array([
[0.1, 0.9],
[0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.5]
])
dhmm.DiscreteHiddenMM(chain, b)
@ntools.raises(ValueError)
def test_distribution_negative():
pi = np.array([0.1, 0.9])
a = np.array([
[0.1, 0.9],
[0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.6, -0.2],
[0.1, 0.1, 0.2, 0.3, 0.3],
])
dhmm.DiscreteHiddenMM(chain, b)
def test_generation_1():
pi = np.array([0.1, 0.9])
a = np.array([
[0.1, 0.9],
[0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.3]
])
model = dhmm.DiscreteHiddenMM(chain, b)
result = model.generate(100)
assert result.shape[0] == 100
assert result.shape[1] == 2
def test_likelihood_summation():
pi = np.array([0.1, 0.9])
a = np.array([
[0.1, 0.9],
[0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.3]
])
model = dhmm.DiscreteHiddenMM(chain, b)
summation = 0.0
for i in range(5):
for j in range(5):
for k in range(5):
for z in range(5):
observations = [i, j, k, z]
likelihood = model.likelihood(np.array(observations, dtype=int))
assert likelihood >= 0.0
summation += likelihood
assert np.abs(summation - 1.0) < cnst.EPSILON
def test_likelihood_basic():
pi = np.array([0.1, 0.9])
a = np.array([
[0.1, 0.9],
[0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.3]
])
model = dhmm.DiscreteHiddenMM(chain, b)
assert np.abs(model.likelihood(np.array([0], dtype=int)) - 0.11) < cnst.EPSILON
assert np.abs(model.likelihood(np.array([1], dtype=int)) - 0.11) < cnst.EPSILON
assert np.abs(model.likelihood(np.array([2], dtype=int)) - 0.20) < cnst.EPSILON
assert np.abs(model.likelihood(np.array([3], dtype=int)) - 0.29) < cnst.EPSILON
assert np.abs(model.likelihood(np.array([4], dtype=int)) - 0.29) < cnst.EPSILON
def test_solve_for_state_constant_chain():
pi = np.array([0.0, 0.0, 1.0])
a = np.array([
[1.0, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.4, 0.2, 0.2, 0.2, 0.0],
[0.1, 0.1, 0.2, 0.3, 0.3],
[0.05, 0.0, 0.0, 0.45, 0.5]
])
model = dhmm.DiscreteHiddenMM(chain, b)
assert np.allclose(model.solve_for_states(np.array([0, 1, 2, 3, 4])), np.array([2, 1, 1, 1, 1]))
def test_solve_for_state_list():
pi = np.array([1.0, 0.0, 0.0])
a = np.array([
[1.0, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.3],
[0.0, 0.0, 0.0, 0.5, 0.5]
])
model = dhmm.DiscreteHiddenMM(chain, b)
assert np.allclose(model.solve_for_states(np.array([0, 1, 2, 3, 4])), 0)
@ntools.raises(ValueError)
def test_solve_for_state_constant_chain():
pi = np.array([0.0, 0.0, 1.0])
a = np.array([
[1.0, 0.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 0.5, 0.5]
])
chain = mc.MarkovChain(pi, a)
b = np.array([
[0.2, 0.2, 0.2, 0.2, 0.2],
[0.1, 0.1, 0.2, 0.3, 0.3],
[0.0, 0.0, 0.0, 0.5, 0.5]
])
model = dhmm.DiscreteHiddenMM(chain, b)
model.solve_for_states(np.array([0, 1, 2, 3, 4]))
def test_fit_single():
pi = | np.array([0.3, 0.4, 0.3]) | numpy.array |
import sys
from collections import defaultdict
from handlers.FastqReader import FastqReader
from handlers.FastaHandler import FastaHandler
from matplotlib import pyplot
import numpy
'''
Iterate a fastq file and find the read length distribution, as well as cumulative number of reads above thresholds
'''
# READS_PATH = "/home/ryan/data/Nanopore/ecoli/miten/guppy/subsampled/30x/r94_ec_rad2.30x.fastq"
# READS_PATH = "/home/ryan/data/Nanopore/ecoli/miten/guppy/r94_ec_guppy.first50k.fastq"
# READS_PATH = "/home/ryan/data/Nanopore/ecoli/miten/guppy/r94_ec_guppy.fastq"
READS_PATH = "/home/ryan/data/Nanopore/Human/paolo/LC2019/shasta_assembly_GM24143.fasta"
# READS_PATH = "/home/ryan/data/Nanopore/ecoli/flapppie/03_22_19_R941_gEcoli_first_410k.fastq"
# READS_PATH = "/home/ryan/data/Nanopore/ecoli/flapppie/03_22_19_R941_gEcoli_last_410k.fastq"
# READS_PATH = "/home/ryan/Downloads/r94_ec_rad2.30x.fastq"
def plot_length_distribution(step, bins, frequencies):
axes = pyplot.axes()
center = (bins[:-1] + bins[1:]) / 2
axes.bar(center, frequencies, width=step, align="center")
axes.set_xlabel("Read length (bp)")
axes.set_ylabel("Frequency")
pyplot.show()
pyplot.close()
def print_stats(step, frequencies, n_reads):
print(READS_PATH.strip().split("/")[-1])
print("\t\t\t\tn\tproportion")
for threshold in [10000,20000,30000]:
index = threshold/step - 1
index = int(index)
right_side_sum = numpy.sum(frequencies[index:])
proportion = right_side_sum/n_reads
# number of reads greater than "threshold"
print("reads greater than %d:\t%d\t%.3f" % (threshold, right_side_sum, proportion))
print("n reads total (all lengths):\t%d"%n_reads)
def main():
if READS_PATH.endswith(".fastq"):
reads = FastqReader().iterate_file(path=READS_PATH)
elif READS_PATH.endswith(".fasta"):
reads = FastaHandler(READS_PATH).iterate_file()
else:
exit("Improper file format: %s" % READS_PATH)
n_reads = 0
lengths = list()
length_sum = 0
for i, item in enumerate(reads):
n_reads += 1
if READS_PATH.endswith(".fastq"):
header, sequence, quality = item
elif READS_PATH.endswith(".fasta"):
header, sequence = item
# print()
# print(header)
# print(sequence[:30])
# print(quality[:30])
lengths.append(len(sequence))
length_sum += len(sequence)
sys.stdout.write("\r%d"%i)
print()
# ---- Plotting ----
step = 500 # bin size
max_length = 50000 # end of histogram
bins = | numpy.arange(0, max_length + step, step=step) | numpy.arange |
# encoding: utf-8
#
# @Author: <NAME>, <NAME>
# @Date: Nov 15, 2021
# @Filename: ism.py
# @License: BSD 3-Clause
# @Copyright: <NAME>, <NAME>
import os.path
from astropy import units as u
from astropy import constants as c
import numpy as np
from astropy.io import fits, ascii
from astropy.table import Table
from scipy.special import sph_harm
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy.coordinates import SkyCoord
from astropy.modeling.models import Sersic2D
from dataclasses import dataclass
import sys
if (sys.version_info[0]+sys.version_info[1]/10.) < 3.8:
from backports.cached_property import cached_property
else:
from functools import cached_property
from scipy.ndimage.interpolation import map_coordinates
from scipy.interpolate import interp1d, interp2d
import lvmdatasimulator
from lvmdatasimulator import log
import progressbar
from joblib import Parallel, delayed
from astropy.convolution import convolve_fft, kernels
from lvmdatasimulator.utils import calc_circular_mask, convolve_array, set_default_dict_values, \
ism_extinction, check_overlap, assign_units
fluxunit = u.erg / (u.cm ** 2 * u.s * u.arcsec ** 2)
velunit = u.km / u.s
def brightness_inhomogeneities_sphere(harm_amplitudes, ll, phi_cur, theta_cur, rho, med, radius, thickness):
"""
Auxiliary function producing the inhomogeneities on the brightness distribution for the Cloud of Bubble objects
using the spherical harmonics.
"""
brt = theta_cur * 0
for m in np.arange(-ll, ll + 1):
brt += (harm_amplitudes[m + ll * (ll + 1) - 1] * sph_harm(m, ll, phi_cur, theta_cur).real * med *
(1 - np.sqrt(abs(rho.value ** 2 / radius.value ** 2 - (1 - thickness / 2) ** 2))))
return brt
def sphere_brt_in_line(brt_3d, rad_3d, rad_model, flux_model):
"""
Auxiliary function computing the brightness of the Cloud or Bubble at given radii and in given line
according to the Cloudy models
"""
p = interp1d(rad_model, flux_model, fill_value='extrapolate', assume_sorted=True)
return p(rad_3d) * brt_3d
def interpolate_sphere_to_cartesian(spherical_array, x_grid=None, y_grid=None, z_grid=None,
rad_grid=None, theta_grid=None, phi_grid=None, pxscale=1. * u.pc):
"""
Auxiliary function to project the brightness or velocities from the spherical to cartesian coordinates
"""
x, y, z = np.meshgrid(x_grid, y_grid, z_grid, indexing='ij')
phi_c, theta_c, rad_c = xyz_to_sphere(x, y, z, pxscale=pxscale)
ir = interp1d(rad_grid, np.arange(len(rad_grid)), bounds_error=False)
ith = interp1d(theta_grid, np.arange(len(theta_grid)))
iphi = interp1d(phi_grid, np.arange(len(phi_grid)))
new_ir = ir(rad_c.ravel())
new_ith = ith(theta_c.ravel())
new_iphi = iphi(phi_c.ravel())
cart_data = map_coordinates(spherical_array, np.vstack([new_ir, new_ith, new_iphi]),
order=1, mode='constant', cval=0)
return cart_data.reshape([len(x_grid), len(y_grid), len(z_grid)]).T
def limit_angle(value, bottom_limit=0, top_limit=np.pi):
"""
Auxiliary function to limit the angle values to the range of [0, pi]
"""
value[value < bottom_limit] += (top_limit - bottom_limit)
value[value > top_limit] -= (top_limit - bottom_limit)
return value
def xyz_to_sphere(x, y, z, pxscale=1. * u.pc):
"""
Auxiliary function to map the coordinates from cartesian to spherical system
"""
phi_c = np.arctan2(y, x)
rad_c = (np.sqrt(x ** 2 + y ** 2 + z ** 2))
rad_c[rad_c == 0 * u.pc] = 1e-3 * pxscale
theta_c = (np.arccos(z / rad_c))
phi_c = limit_angle(phi_c, 0 * u.radian, 2 * np.pi * u.radian)
theta_c = limit_angle(theta_c, 0 * u.radian, np.pi * u.radian)
return phi_c, theta_c, rad_c
def find_model_id(file=lvmdatasimulator.CLOUDY_MODELS,
check_id=None, params=lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']):
"""
Checks the input parameters of the pre-computed Cloudy model and return corresponding index in the grid
"""
with fits.open(file) as hdu:
if check_id is None:
if params is None:
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning(f'Default Cloudy model will be used (id = {check_id})')
else:
summary_table = Table(hdu['Summary'].data)
indexes = np.arange(len(summary_table)).astype(int)
rec_table = np.ones(shape=len(summary_table), dtype=bool)
def closest(rec, prop, val):
unique_col = np.unique(summary_table[prop][rec])
if isinstance(val, str):
res = unique_col[unique_col == val]
if len(res) == 0:
return ""
return res
else:
return unique_col[np.argsort(np.abs(unique_col - val))[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
rec_table = rec_table & (summary_table[p] == closest(indexes, p, params[p]))
indexes = np.flatnonzero(rec_table)
if len(indexes) == 0:
break
if len(indexes) == 0 or len(indexes) == len(summary_table):
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
'Default Cloudy model will be used (id = {0})'.format(check_id))
elif len(indexes) == 1:
check_id = summary_table['Model_ID'][indexes[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or
isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
if params[p] != summary_table[p][indexes[0]]:
log.warning(f'Use the closest pre-computed Cloudy model with id = {check_id}')
break
else:
check_id = summary_table['Model_ID'][indexes[0]]
log.warning(f'Select one of the closest pre-computed Cloudy model with id = {check_id}')
#
# for cur_ext in range(len(hdu)):
# if cur_ext == 0:
# continue
# found = False
# for p in params:
# if p == 'id':
# continue
# precision = 1
# if p == 'Z':
# precision = 2
# if np.round(params[p], precision) != np.round(hdu[cur_ext].header[p], precision):
# break
# else:
# found = True
# if found:
# return cur_ext, check_id
# check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
# log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
# 'Default Cloudy model will be used (id = {0})'.format(check_id))
extension_index = None
while extension_index is None:
extension_index = [cur_ext for cur_ext in range(len(hdu)) if (
check_id == hdu[cur_ext].header.get('MODEL_ID'))]
if len(extension_index) == 0:
if check_id == lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use the first one in the grid instead'.format(check_id))
extension_index = 1
else:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use default ({1}) instead'.format(check_id,
lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']))
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
extension_index = None
else:
extension_index = extension_index[0]
return extension_index, check_id
@dataclass
class Nebula:
"""
Base class defining properties of every nebula type.
By itself it describes the rectangular nebula (e.g. DIG)
Constructed nebula has 4 dimensions, where 4th derive its appearance in different lines
(if spectrum_id is None, or if it is dark nebula => only one line)
"""
xc: int = None # Center of the region in the field of view, pix
yc: int = None # Center of the region in the field of view, pix
x0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
y0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
pix_width: int = None # full width of cartesian grid, pix (should be odd)
pix_height: int = None # full height of cartesian grid, pix (should be odd)
width: u.pc = 0 * u.pc # width of the nebula in pc (not used if pix_width is set up)
height: u.pc = 0 * u.pc # height of the nebula in pc (not used if pix_height is set up)
pxscale: u.pc = 0.01 * u.pc # pixel size in pc
spectrum_id: int = None # ID of a template Cloudy emission spectrum for this nebula
n_brightest_lines: int = None # limit the number of the lines to the first N brightest
sys_velocity: velunit = 0 * velunit # Systemic velocity
turbulent_sigma: velunit = 10 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
max_brightness: fluxunit = 1e-15 * fluxunit
max_extinction: u.mag = 0 * u.mag
perturb_scale: int = 0 * u.pc # Spatial scale of correlated perturbations
perturb_amplitude: float = 0.1 # Maximal amplitude of perturbations
_npix_los: int = 1 # full size along line of sight in pixels
nchunks: int = -1 # number of chuncks to use for the convolution. If negative, select automatically
vel_gradient: (velunit / u.pc) = 0 # velocity gradient along the nebula
vel_pa: u.degree = 0 # Position angle of the kinematical axis (for the velocity gradient or rotation velocity)
def __post_init__(self):
self._assign_all_units()
self._assign_position_params()
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
def _assign_all_units(self):
whole_list_properties = ['pxscale', 'sys_velocity', 'turbulent_sigma', 'max_brightness', 'max_extinction',
'perturb_scale', 'radius', 'PA', 'length', 'width', 'vel_gradient', 'r_eff',
'vel_rot', 'expansion_velocity', 'spectral_axis', 'vel_pa']
whole_list_units = [u.pc, velunit, velunit, fluxunit, u.mag, u.pc, u.pc, u.degree, u.pc, u.pc,
(velunit / u.pc), u.kpc, velunit, velunit, velunit, u.degree]
cur_list_properties = []
cur_list_units = []
for prp, unit in zip(whole_list_properties, whole_list_units):
if hasattr(self, prp):
cur_list_properties.append(prp)
cur_list_units.append(unit)
assign_units(self, cur_list_properties, cur_list_units)
def _assign_position_params(self, conversion_type='rect'):
if conversion_type == 'rect':
for v in ['height', 'width']:
if self.__getattribute__(f'pix_{v}') is None:
val = np.round((self.__getattribute__(v) / self.pxscale).value / 2.).astype(int) * 2 + 1
else:
val = np.round(self.__getattribute__(f'pix_{v}') / 2.).astype(int) * 2 + 1
setattr(self, f'pix_{v}', val)
elif conversion_type == 'ellipse':
self.pix_width = (np.round(np.abs(self.radius / self.pxscale * np.sin(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.radius / self.pxscale * np.cos(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'galaxy':
self.pix_width = (np.round(np.abs(self.r_max * np.sin(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.r_max * np.cos(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'cylinder':
self.pix_width = (np.ceil((self.length * np.abs(np.sin(self.PA)) +
self.width * np.abs(np.cos(self.PA))) / self.pxscale / 2.
).astype(int) * 2 + 1).value
self.pix_height = (np.ceil((self.length * np.abs( | np.cos(self.PA) | numpy.cos |
"""
Solving MsPacman-ram-v0 environment
Author: <NAME>(<EMAIL>)
"""
import gym
import numpy as np
import tensorflow as tf
#learning parameters
y = .95 #gamma
e = 0.1 #random selection epsilion
memory_size = 1000
num_episodes = 2000
env_features = 128 #128 bytes of atari console's RAM
train_batch_size = 64
random_threshold = 1000
def new_weights(shape, name="weight"):
'''Creates and initializes weight matrices'''
return tf.Variable(tf.truncated_normal(shape, stddev=0.1), name=name)
def new_biases(length, name="bias"):
'''Creates bias vectors for layers'''
return tf.Variable(tf.constant(0.1, shape=[length]), name=name)
def new_layer(input, num_inputs, num_outputs, use_relu=True, name="new_layer"):
'''Creates a new dense layer with dimensions as per the parameters'''
weights = new_weights(shape=[num_inputs, num_outputs], name="{}_weights".format(name))
biases = new_biases(length=num_outputs, name="{}_biases".format(name))
layer = tf.matmul(input, weights) + biases
if use_relu:
layer = tf.nn.relu(layer)
return layer
env = gym.make('MsPacman-ram-v0')
#build a table to store previous game states
table = | np.zeros((memory_size, env_features * 2 + 2)) | numpy.zeros |
# --- built in ---
import os
import math
# --- 3rd party ---
import numpy as np
import cv2
import moderngl
# --- my module ---
import dungeon_maps as dmap
from dungeon_maps.sim import RESOURCE_ROOT
def read_resource(filename):
"""Read text files"""
return open(os.path.join(RESOURCE_ROOT, filename), 'r').read()
def subtract_pose(p1, p2):
"""Caulate delta pose from p1 -> p2"""
x1, y1, o1 = p1[...,0], p1[...,1], p1[...,2]
x2, y2, o2 = p2[...,0], p2[...,1], p2[...,2]
r = ((x1-x2)**2.0 + (y1-y2)**2.0)**0.5 # distance
p = np.arctan2(y2-y1, x2-x1) - o1 #
do = o2 - o1
do = np.arctan2(np.sin(do), np.cos(do)) # [-pi/2, pi/2]
dx = r * np.cos(p)
dy = r * np.sin(p)
return np.stack([dx, dy, do], axis=-1) # (batch, 3)
class Playground():
# Action definitions
NONE = 0
FORWARD = 1
LEFT = 2
RIGHT = 3
BACKWARD = 4
STOP = 5
def __init__(
self,
width: int = 800,
height: int = 600,
hfov: float = 1.2217304,
cam_pitch: float = -0.3490659,
cam_height: float = 0.88,
# --- simulator macros ---
min_depth: float = 0.1,
max_depth: float = 10.0,
ray_iter: int = 250,
ray_mult: float = 0.95,
shadow_iter: int = 48,
shadow_max_step: float = 0.05,
maze_scale: float = 3.0,
wall_height: float = 1.0,
wall_width: float = 0.25,
ctx = None
):
"""Create playground simulator
Args:
width (int, optional): image width. Defaults to 800.
height (int, optional): image height. Defaults to 600.
hfov (float, optional): horizontal field of view (rad). Defaults to
1.2217304. ~70 deg
cam_pitch (float, optional): pitch angle of camera (rad). Defaults to
-0.3490659. ~-20 deg
cam_height (float, optional): vertical distance from ground to camera.
(meter) Defaults to 0.88.
min_depth (float, optional): minimum depth of depth sensor (meter).
Defaults to 0.1.
max_depth (float, optional): maximum depth of depth sensor (meter).
Defaults to 10.0.
ray_iter (int, optional): maximum number of ray marching steps. Defaults
to 250.
ray_mult (float, optional): size multiplier of each ray marching step.
Defaults to 0.95.
shadow_iter (int, optional): maximum number of steps to casting shadow.
Defaults to 48.
shadow_max_step (float, optional): maximum distance of steps when casting
shadow. Defaults to 0.05.
maze_scale (float, optional): size multiplier of the maze. Defaults to 2.0.
wall_height (float, optional): height of maze's wall (meter). Defaults to 1.0.
wall_width (float, optional): width of maze's wall (meter). Defaults to 0.25.
ctx (optional): moderngl context object. Defaults to None.
"""
self.width = width
self.height = height
self.window_size = (width, height)
self.min_depth = min_depth
self.max_depth = max_depth
self.ray_iter = ray_iter
self.ray_mult = ray_mult
self.shadow_iter = shadow_iter
self.shadow_max_step = shadow_max_step
self.maze_scale = maze_scale
self.wall_height = wall_height
self.wall_width = wall_width
# Cretae context if it's not given
if ctx is None:
ctx = moderngl.create_context(standalone=True, backend='egl')
self.ctx = ctx
# Create framebuffer for offline rendering
self.fbo = ctx.simple_framebuffer((width, height), components=4)
self.fbo.use()
self.fbo.clear()
self.ctx.enable(moderngl.DEPTH_TEST)
# Create shader program
self.program = self.ctx.program(
vertex_shader = read_resource('playground.vs'),
fragment_shader = read_resource('playground.fs')
)
self.vao = self.create_screen_vao(self.program)
# Uniforms
program = self.program
self.iTime = program.get("iTime", None)
self.iResolution = program.get("iResolution", None)
self.iPosition = program.get("iPosition", None)
self.iTarget = program.get("iTarget", None)
self.iHFOV = program.get("iHFOV", None)
self.iSeg = program.get("iSeg", None)
self.set_macros()
# Player initial states
self.hfov = hfov
self.init_pos = np.array((0., cam_height, 0.), dtype=np.float64)
# pitch, yaw, roll (not used)
self.init_rot = np.array((cam_pitch, math.radians(-135.), 0.), dtype=np.float64)
self.step_size = 0.5
# Player current states
self.cur_pos = self.init_pos.copy()
self.cur_rot = self.init_rot.copy()
self.cur_dir = None
self.cur_steps = 0
self.delta_time = 0.25
self.update_player_states()
def set_macros(self):
def set_if_not_none(key, value):
uniform = self.program.get(key, None)
if uniform is not None:
uniform.value = value
set_if_not_none('MIN_DEPTH', self.min_depth)
set_if_not_none('MAX_DEPTH', self.max_depth)
set_if_not_none('RAY_ITER', self.ray_iter)
set_if_not_none('RAY_MULT', self.ray_mult)
set_if_not_none('SHADOW_ITER', self.shadow_iter)
set_if_not_none('SHADOW_MAX_STEP', self.shadow_max_step)
set_if_not_none('MAZE_SCALE', self.maze_scale)
set_if_not_none('WALL_HEIGHT', self.wall_height)
set_if_not_none('WALL_WIDTH', self.wall_width)
def create_screen_vao(self, program):
vertex_data = np.array([
# x, y, z, u, v
-1.0, -1.0, 0.0, 0.0, 0.0,
+1.0, -1.0, 0.0, 1.0, 0.0,
-1.0, +1.0, 0.0, 0.0, 1.0,
+1.0, +1.0, 0.0, 1.0, 1.0,
]).astype(np.float32)
content = [(
self.ctx.buffer(vertex_data),
'3f 2f',
'in_vert', 'in_uv'
)]
idx_data = np.array([
0, 1, 2,
1, 2, 3
]).astype(np.int32)
idx_buffer = self.ctx.buffer(idx_data)
return self.ctx.vertex_array(program, content, idx_buffer)
def update_player_states(self):
pitch, yaw, _ = self.cur_rot
self.cur_dir = np.array((
math.cos(pitch) * (-math.sin(yaw)),
math.sin(pitch),
math.cos(pitch) * math.cos(yaw)
), dtype=np.float64)
def render(self, mode: str='rgb_array'):
if self.iTime is not None:
self.iTime.value = self.delta_time * self.cur_steps
if self.iResolution is not None:
self.iResolution.value = self.window_size
if self.iPosition is not None:
self.iPosition.value = tuple(
(self.cur_pos * [1., 1., -1.]).tolist() # flip z-axis
)
if self.iTarget is not None:
self.iTarget.value = tuple(
((self.cur_pos + self.cur_dir)* [1., 1., -1.]).tolist() # flip z-axis
)
if self.iHFOV is not None:
self.iHFOV.value = self.hfov
if self.iSeg is not None:
self.iSeg.value = 0.
self.fbo.use()
self.fbo.clear()
self.vao.render()
# Get RGB image
raw_bytes = np.frombuffer(self.fbo.read(components=4), dtype=np.uint8)
image = raw_bytes.reshape(self.height, self.width, 4)
image = image[::-1][..., 0:3] # flip horizontal, RGB
# Get Depth image
raw_bytes = np.frombuffer(
self.fbo.read(attachment=-1, dtype='f4'),
dtype=np.float32
)
depth = raw_bytes.reshape(self.height, self.width, 1)
depth = depth[::-1]
# Render segmentation
if self.iSeg is not None:
self.iSeg.value = 1.
self.fbo.clear()
self.vao.render()
# Get segmentation image
raw_bytes = np.frombuffer(self.fbo.read(components=4), dtype=np.uint8)
seg_image = raw_bytes.reshape(self.height, self.width, 4)
seg_image = seg_image[::-1][..., 0:1].astype(np.int64)
return {
'rgb': image,
'depth': depth,
'segmentation': seg_image
}
def calc_related_pose(self):
pose1 = np.array([
self.init_pos[0],
self.init_pos[2],
self.init_rot[1]],
dtype=np.float64
)
pose2 = np.array([
self.cur_pos[0],
self.cur_pos[2],
self.cur_rot[1]
], dtype=np.float64)
return subtract_pose(pose1, pose2)
def _get_observations(self):
observations = self.render()
pose = self.calc_related_pose()
observations['pose_gt'] = pose
return observations
def reset(self):
# Reset player states
self.cur_pos = self.init_pos.copy()
self.cur_rot = self.init_rot.copy()
self.cur_dir = None
self.cur_steps = 0
self.update_player_states()
return self._get_observations()
def step(self, action):
if action == self.NONE:
pass
elif action == self.FORWARD:
d = np.array((self.cur_dir[0], 0., self.cur_dir[2]), dtype=np.float64)
d = d / | np.linalg.norm(d) | numpy.linalg.norm |
#-- -- -- -- Intermediate Python
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
####### -----> Matplotlib
### --------------------------------------------------------
## Line plot - ex#0
# Print the last item from year and pop
print(year[-1])
print(pop[-1])
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Make a line plot: year on the x-axis, pop on the y-axis
plt.plot(year, pop)
# Display the plot with plt.show()
plt.show()
### --------------------------------------------------------
## Line plot - ex#1
import matplotlib.pyplot as plt
# Print the last item of gdp_cap and life_exp
print(gdp_cap[-1])
print(life_exp[-1])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
plt.plot(gdp_cap, life_exp)
# Display the plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex0
import matplotlib.pyplot as plt
# Change the line plot below to a scatter plot
plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
plt.xscale('log')
# Show plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex1
# Import package
import matplotlib.pyplot as plt
# Build Scatter plot
plt.scatter(pop, life_exp)
# Show plot
plt.show()
## HISTOGRAMS
### --------------------------------------------------------
### -> Build a histogram
import matplotlib.pyplot as plt
# Create histogram of life_exp data
plt.hist(life_exp)
# Display histogram
plt.show()
### --------------------------------------------------------
## Build a histogram --- bins
import matplotlib.pyplot as plt
# Build histogram with 5 bins
plt.hist(life_exp, bins=5)
# Show and clean up plot
plt.show()
plt.clf()
# Build histogram with 20 bins
plt.hist(life_exp, bins=20)
# Show and clean up again
plt.show()
plt.clf()
### --------------------------------------------------------
## Build a histogram --- compare
import matplotlib.pyplot as plt
# Histogram of life_exp, 15 bins
plt.hist(life_exp, bins=15)
# Show and clear plot
plt.show()
plt.clf()
# Histogram of life_exp1950, 15 bins
plt.hist(life_exp1950, bins=15)
# Show and clear plot again
plt.show()
plt.clf()
### --------------------------------------------------------
# You're a professor teaching Data Science with Python,
# and you want to visually assess if the grades on
# your exam follow a particular distribution.
# Which plot do you use?
# R/ Histogram
### --------------------------------------------------------
# You're a professor in Data Analytics with Python, and you
# want to visually assess if longer answers on exam
# questions lead to higher grades.
# Which plot do you use?
# Scatter plot
### --------------------------------------------------------
### Labels
import matplotlib.pyplot as plt
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
## Ticks
import matplotlib.pyplot as plt
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Definition of tick_val and tick_lab
tick_val = [1000, 10000, 100000]
tick_lab = ['1k', '10k', '100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
#Sizes
# Import numpy as np
import numpy as np
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop*2
# Update: set s argument to np_pop
plt.scatter(gdp_cap, life_exp, s = np_pop)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000, 10000, 100000],['1k', '10k', '100k'])
# Display the plot
plt.show()
### --------------------------------------------------------
### Colors
import matplotlib.pyplot as plt
# Specify c and alpha inside plt.scatter()
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Show the plot
plt.show()
### --------------------------------------------------------
## Additional Customizations
# Scatter plot
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Additional customizations
plt.text(1550, 71, 'India')
plt.text(5700, 80, 'China')
# Add grid() call
plt.grid(True)
# Show the plot
plt.show()
### --------------------------------------------------------
#### INTERPRETATION
# If you have a look at your colorful plot,
# it's clear that people live longer in countries with a
# higher GDP per capita. No high income countries have r
# eally short life expectancy, and no low income countries
# have very long life expectancy. Still, there is a huge
# difference in life expectancy between countries on the same
# income level. Most people live in middle income countries
# where difference in lifespan is huge between countries;
# depending on how income is distributed and how it is used.
# What can you say about the plot?
## R/ The countries in blue, corresponding to Africa, have
# both low life expectancy and a low GDP per capita.
### Dictionaries, Part 1
### --------------------------------------------------------
### --->Motivation for dictionaries
# Definition of countries and capital
countries = ['spain', 'france', 'germany', 'norway']
capitals = ['madrid', 'paris', 'berlin', 'oslo']
# Get index of 'germany': ind_ger
ind_ger = countries.index('germany')
# Use ind_ger to print out capital of Germany
print(capitals[ind_ger])
### --------------------------------------------------------
## Create dictionary
# Definition of countries and capital
countries = ['spain', 'france', 'germany', 'norway']
capitals = ['madrid', 'paris', 'berlin', 'oslo']
# From string in countries and capitals, create dictionary europe
europe = {
'spain':'madrid',
"france":"paris",
"germany":"berlin",
"norway":"oslo"}
# Print europe
print(europe)
### --------------------------------------------------------
## Access dictionary
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Print out the keys in europe
print(europe.keys())
# Print out value that belongs to key 'norway'
print(europe['norway'])
### Dictionaries, Part 2
### --------------------------------------------------------
### ---> Dictionary Manipulation - ex 0
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Add italy to europe
europe["italy"] = 'rome'
# Print out italy in europe
print('italy' in europe)
# Add poland to europe
europe["poland"] = 'warsaw'
# Print europe
print(europe)
### --------------------------------------------------------
### ---> Dictionary Manipulation - ex 1
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'bonn',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw',
'australia':'vienna' }
# Update capital of germany
europe['germany'] = 'berlin'
# Remove australia
del(europe['australia'])
# Print europe
print(europe)
### --------------------------------------------------------
## Dictionariception
# Dictionary of dictionaries
europe = { 'spain': { 'capital':'madrid', 'population':46.77 },
'france': { 'capital':'paris', 'population':66.03 },
'germany': { 'capital':'berlin', 'population':80.62 },
'norway': { 'capital':'oslo', 'population':5.084 } }
# Print out the capital of France
print(europe['france']['capital'])
# Create sub-dictionary data
data = {'capital': 'rome', 'population': 59.83}
# Add data to europe under key 'italy'
europe['italy'] = data
# Print europe
print(europe)
### Pandas, Part 1
### --------------------------------------------------------
#### ---->>> Dictionary to DataFrame -- ex#0
# Pre-defined lists
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
# Import pandas as pd
import pandas as pd
# Create dictionary my_dict with three key:value pairs: my_dict
my_dict = {'country': names, 'drives_right': dr, 'cars_per_cap': cpc}
# Build a DataFrame cars from my_dict: cars
cars = pd.DataFrame(my_dict)
# Print cars
print(cars)
### --------------------------------------------------------
#### ---->>> Dictionary to DataFrame -- ex#0
import pandas as pd
# Build cars DataFrame
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
cars_dict = { 'country':names, 'drives_right':dr, 'cars_per_cap':cpc }
cars = pd.DataFrame(cars_dict)
print(cars)
# Definition of row_labels
row_labels = ['US', 'AUS', 'JPN', 'IN', 'RU', 'MOR', 'EG']
# Specify row labels of cars
cars.index = row_labels
# Print cars again
print(cars)
### --------------------------------------------------------
### CSV to DataFrame --- ex#0
# Import pandas as pd
import pandas as pd
# Import the cars.csv data: cars
cars = pd.read_csv('cars.csv')
# Print out cars
print(cars)
### --------------------------------------------------------
### CSV to DataFrame --- ex#1
# Import pandas as pd
import pandas as pd
# Fix import by including index_col
cars = pd.read_csv('cars.csv', index_col=0)
# Print out cars
print(cars)
### Pandas, Part 2
### --------------------------------------------------------
### ----> Square Brackets -- ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out country column as Pandas Series
print(cars['country'])
# Print out country column as Pandas DataFrame
print(cars[['country']])
# Print out DataFrame with country and drives_right columns
print(cars[['country', 'drives_right']])
### --------------------------------------------------------
### ----> Square Brackets -- ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out first 3 observations
print(cars[0:3])
# Print out fourth, fifth and sixth observation
print(cars[3:6])
### --------------------------------------------------------
### ---> loc and iloc -- ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out observation for Japan
print(cars.loc['JPN'])
# Print out observations for Australia and Egypt
print(cars.loc[['AUS', 'EG']])
### --------------------------------------------------------
### ---> loc and iloc -- ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out drives_right value of Morocco
print(cars.loc['MOR', 'drives_right'])
# Print sub-DataFrame
print(cars.loc[['RU', 'MOR'], ['country', 'drives_right']])
### --------------------------------------------------------
### ---> loc and iloc -- ex#2
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Print out drives_right column as Series
print(cars.loc[:, 'drives_right'])
# Print out drives_right column as DataFrame
print(cars.loc[:, ['drives_right']])
# Print out cars_per_cap and drives_right as DataFrame
print(cars.loc[:, ['cars_per_cap', 'drives_right']])
## Comparison Operators
### --------------------------------------------------------
### ---> Equality
# Comparison of booleans
True != False
# Comparison of integers
-5 * 15 != 75
# Comparison of strings
"pyscript" != "PyScript"
# Compare a boolean with an integer
True == 1
### --------------------------------------------------------
### ---> Greater and less than
# Comparison of integers
x = -3 * 6
print(x >= -10)
# Comparison of strings
y = "test"
print('test' <= y)
# Comparison of booleans
print(True > False)
### --------------------------------------------------------
### ---> Compare arrays
# Create arrays
import numpy as np
my_house = np.array([18.0, 20.0, 10.75, 9.50])
your_house = np.array([14.0, 24.0, 14.25, 9.0])
# my_house greater than or equal to 18
print(my_house >= 18)
# my_house less than your_house
print(my_house < your_house)
#---- Boolean Operators
### --------------------------------------------------------
## and, or, not -- ex#0
# Define variables
my_kitchen = 18.0
your_kitchen = 14.0
# my_kitchen bigger than 10 and smaller than 18?
print(my_kitchen > 10 and my_kitchen < 18)
# my_kitchen smaller than 14 or bigger than 17?
print(my_kitchen < 14 or my_kitchen > 17)
# Double my_kitchen smaller than triple your_kitchen?
print((my_kitchen * 2) < (your_kitchen * 3))
### --------------------------------------------------------
## and, or, not -- ex#1
# To see if you completely understood the boolean
# operators, have a look at the following piece of Python code:
# x = 8
# y = 9
# not(not(x < 3) and not(y > 14 or y > 10))
# What will the result be if you execute these three
# commands in the IPython Shell?
# NB: Notice that not has a higher priority
# than and and or, it is executed first.
# R/ False
### --------------------------------------------------------
## Boolean operators with Numpy
# Create arrays
import numpy as np
my_house = np.array([18.0, 20.0, 10.75, 9.50])
your_house = np.array([14.0, 24.0, 14.25, 9.0])
# my_house greater than 18.5 or smaller than 10
print(np.logical_or(my_house > 18.5,
my_house < 10))
# Both my_house and your_house smaller than 11
print(np.logical_and(my_house < 11,
your_house < 11))
### --------------------------------------------------------
## ----> if, elif, else
### Warmup
# To experiment with if and else a bit, have a look at this code sample:
# area = 10.0
# if(area < 9) :
# print("small")
# elif(area < 12) :
# print("medium")
# else :
# print("large")
# What will the output be if you run this piece of code in the IPython Shell?
# R/ medium
### --------------------------------------------------------
## if example
# Define variables
room = "kit"
area = 14.0
# if statement for room
if room == "kit" :
print("looking around in the kitchen.")
# if statement for area
if area > 15:
print("big place!")
### --------------------------------------------------------
## Add else
# Define variables
room = "kit"
area = 14.0
# if-else construct for room
if room == "kit" :
print("looking around in the kitchen.")
else :
print("looking around elsewhere.")
# if-else construct for area
if area > 15 :
print("big place!")
else:
print("pretty small.")
### --------------------------------------------------------
## Customize further: elif
# Define variables
room = "bed"
area = 14.0
# if-elif-else construct for room
if room == "kit" :
print("looking around in the kitchen.")
elif room == "bed":
print("looking around in the bedroom.")
else :
print("looking around elsewhere.")
# if-elif-else construct for area
if area > 15 :
print("big place!")
elif area > 10:
print("medium size, nice!")
else :
print("pretty small.")
## Filtering pandas DataFrames
### --------------------------------------------------------
### ---> Driving right - ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Extract drives_right column as Series: dr
dr = cars['drives_right']
# Use dr to subset cars: sel
sel = cars[dr]
# Print sel
print(sel)
### --------------------------------------------------------
### ---> Driving right - ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Convert code to a one-liner
sel = cars[cars['drives_right']]
# Print sel
print(sel)
### --------------------------------------------------------
### ---> Cars per capita # ex0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Create car_maniac: observations that have a cars_per_cap over 500
cpc = cars['cars_per_cap']
many_cars = cpc > 500
car_maniac = cars[many_cars]
# Print car_maniac
print(car_maniac)
### --------------------------------------------------------
### ---> Cars per capita # ex1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Import numpy, you'll need this
import numpy as np
# Create medium: observations with cars_per_cap between 100 and 500
cpc = cars['cars_per_cap']
between = np.logical_and(cpc > 100, cpc < 500)
medium = cars[between]
# Print medium
print(medium)
## while loop
### --------------------------------------------------------
## ----> while: warming up
# The while loop is like a repeated if statement.
# The code is executed over and over again, as long as
# the condition is True. Have another look at its recipe.
# while condition :
# expression
# Can you tell how many printouts the following while loop will do?
# x = 1
# while x < 4 :
# print(x)
# x = x + 1
## R/ 3
### --------------------------------------------------------
## Basic while loop
# Initialize offset
offset = 8
# Code the while loop
while offset != 0:
print("correcting...")
offset = offset - 1
print(offset)
### --------------------------------------------------------
## Add conditionals
# Initialize offset
offset = -6
# Code the while loop
while offset != 0 :
print("correcting...")
if offset > 0:
offset = offset - 1
else:
offset = offset + 1
print(offset)
## for loop
### --------------------------------------------------------
# -- Loop over a list
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Code the for loop
for a in areas:
print(a)
### --------------------------------------------------------
# Indexes and values - ex#0
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Change for loop to use enumerate()
for index, a in enumerate(areas):
print("room " + str(index) + ": " + str(a))
### --------------------------------------------------------
# Indexes and values - ex#1
# areas list
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Code the for loop
for index, area in enumerate(areas) :
print("room " + str(index+1) + ": " + str(area))
### --------------------------------------------------------
## Loop over list of lists
# house list of lists
house = [["hallway", 11.25],
["kitchen", 18.0],
["living room", 20.0],
["bedroom", 10.75],
["bathroom", 9.50]]
# Build a for loop from scratch
for room, area in house :
print("the " + str(room) + " is " + str(area) + " sqm")
### Loop Data Structures Part 1
### --------------------------------------------------------
## ---> Loop over dictionary
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw', 'austria':'vienna' }
# Iterate over europe
for k, v in europe.items():
print("the capital of " + str(k) + " is " + str(v))
### --------------------------------------------------------
## ---> Loop over Numpy array
# Import numpy as np
import numpy as np
# For loop over np_height
for x in np_height:
print(str(x) + " inches")
# For loop over np_baseball
for x in np.nditer(np_baseball):
print(x)
#### Loop Data Structures Part 2
### --------------------------------------------------------
###------> Loop over DataFrame -- ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Iterate over rows of cars
for lab, row in cars.iterrows():
print(lab)
print(row)
### --------------------------------------------------------
###------> Loop over DataFrame -- ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Adapt for loop
for lab, row in cars.iterrows() :
print(str(lab) + ": " + str(row["cars_per_cap"]))
### --------------------------------------------------------
## Add column - ex#0
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Code for loop that adds COUNTRY column
for lab, row in cars.iterrows():
cars.loc[lab, 'COUNTRY'] = row['country'].upper()
# Print cars
print(cars)
### --------------------------------------------------------
## Add column - ex#1
# Import cars data
import pandas as pd
cars = pd.read_csv('cars.csv', index_col = 0)
# Use .apply(str.upper)
cars['COUNTRY'] = cars['country'].apply(str.upper)
### Random Numbers
### --------------------------------------------------------
### ---> Random float
# Import numpy as np
import numpy as np
# Set the seed
np.random.seed(123)
# Generate and print random float
print(np.random.rand())
### --------------------------------------------------------
### ----> Roll the dice
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Use randint() to simulate a dice
print(np.random.randint(1, 7))
# Use randint() again
print(np.random.randint(1, 7))
### --------------------------------------------------------
### ---->Determine your next move
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Starting step
step = 50
# Roll the dice
dice = np.random.randint(1, 7)
# Finish the control construct
if dice <= 2:
step = step - 1
elif dice < 6:
step = step + 1
else:
step = step + np.random.randint(1,7)
# Print out dice and step
print(dice)
print(step)
### Random Walk
### --------------------------------------------------------
## ---> The next step
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Initialize random_walk
random_walk = [0]
# Complete the loop
for x in range(100):
# Set step: last element in random_walk
step = random_walk[-1]
# Roll the dice
dice = np.random.randint(1,7)
# Determine next step
if dice <= 2:
step = step - 1
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
# append next_step to random_walk
random_walk.append(step)
# Print random_walk
print(random_walk)
### --------------------------------------------------------
### ---> How low can you go?
# Import numpy and set seed
import numpy as np
np.random.seed(123)
# Initialize random_walk
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = np.random.randint(1,7)
if dice <= 2:
# Replace below: use max to make sure step can't go below 0
step = max(0, step - 1)
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
random_walk.append(step)
print(random_walk)
### --------------------------------------------------------
### ---> Visualize the walk
# Initialization
import numpy as np
np.random.seed(123)
random_walk = [0]
for x in range(100) :
step = random_walk[-1]
dice = np.random.randint(1,7)
if dice <= 2:
step = max(0, step - 1)
elif dice <= 5:
step = step + 1
else:
step = step + np.random.randint(1,7)
random_walk.append(step)
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Plot random_walk
plt.plot(random_walk)
# Show the plot
plt.show()
### Distribution
### --------------------------------------------------------
### ---> Simulate multiple walks
# Initialization
import numpy as np
| np.random.seed(123) | numpy.random.seed |
import pandas as pd
import os
import torch
import argparse
import numpy as np
import json
import matplotlib.pyplot as plt
import torch.nn.functional as F
from utils import smooth
from utils import detect_with_thresholding
from utils import get_dataset, normalize, interpolate
from utils import mask_to_detections, load_config_file
from utils import output_detections_ucf_crime
from utils import ucf_crime_old_cls_names, ucf_crime_old_cls_indices
from utils import prepare_gt, segment_iou, load_config_file
from collections import OrderedDict
import pdb
def softmax(x, dim):
x = F.softmax(torch.from_numpy(x), dim=dim)
return x.numpy()
def prepare_detections(detpth):
f = open(detpth)
det_list = []
for i in f.readlines():
i = i.replace('\n', '')
i = i.split(' ')
det_list.append(i)
df = pd.DataFrame(det_list)
df.columns = ['videoname', 'start', 'end', 'cls', 'conf']
f.close()
return df
def interpolated_prec_rec(prec, rec):
"""
Interpolated AP - VOCdevkit from VOC 2011.
"""
mprec = np.hstack([[0], prec, [0]])
mrec = np.hstack([[0], rec, [1]])
for i in range(len(mprec) - 1)[::-1]:
mprec[i] = max(mprec[i], mprec[i + 1])
idx = np.where(mrec[1::] != mrec[0:-1])[0] + 1
ap = np.sum((mrec[idx] - mrec[idx - 1]) * mprec[idx])
return ap
def compute_average_precision_detection(ground_truth, prediction, tiou_thresholds):
"""
Compute average precision (detection task) between ground truth and
predictions data frames. If multiple predictions occurs for the same
predicted segment, only the one with highest IoU score is matched as
true positive. This code is greatly inspired by Pascal VOC devkit.
Parameters
----------
ground_truth :
Data frame containing the ground truth instances.
Required fields: ['videoname', 'start', 'end']
prediction :
Data frame containing the prediction instances.
Required fields: ['videoname', 'start', 'end', 'conf']
tiou_thresholds : 1darray, optional
Temporal intersection over union threshold.
Outputs
-------
ap : float
Average precision score.
"""
prediction = prediction.reset_index(drop=True)
npos = float(len(ground_truth))
lock_gt = np.ones((len(tiou_thresholds), len(ground_truth))) * -1
# Sort predictions by decreasing (confidence) score order.
sort_idx = prediction['conf'].values.argsort()[::-1]
prediction = prediction.loc[sort_idx].reset_index(drop=True)
# Initialize true positive and false positive vectors.
tp = np.zeros((len(tiou_thresholds), len(prediction)))
fp = np.zeros((len(tiou_thresholds), len(prediction)))
# Adaptation to query faster
ground_truth_gbvn = ground_truth.groupby('videoname')
# Assigning true positive to truly grount truth instances.
for idx, this_pred in prediction.iterrows():
try:
# Check if there is at least one ground truth in the video associated with predicted video.
ground_truth_videoid = ground_truth_gbvn.get_group(this_pred['videoname'])
except Exception as e:
fp[:, idx] = 1
continue
this_gt = ground_truth_videoid.reset_index()
tiou_arr = segment_iou(this_pred[['start', 'end']].values, this_gt[['start', 'end']].values)
# We would like to retrieve the predictions with highest tiou score.
tiou_sorted_idx = tiou_arr.argsort()[::-1]
for tidx, tiou_thr in enumerate(tiou_thresholds):
for jdx in tiou_sorted_idx:
if tiou_arr[jdx] < tiou_thr:
fp[tidx, idx] = 1
break
if lock_gt[tidx, this_gt.loc[jdx]['index']] >= 0:
continue
# Assign as true positive after the filters above.
tp[tidx, idx] = 1
lock_gt[tidx, this_gt.loc[jdx]['index']] = idx
break
if fp[tidx, idx] == 0 and tp[tidx, idx] == 0:
fp[tidx, idx] = 1
ap = np.zeros(len(tiou_thresholds))
rec, prec = None, None
for tidx in range(len(tiou_thresholds)):
# Computing prec-rec (per-class basis at every individual tiou_thresholds)
this_tp = np.cumsum(tp[tidx,:]).astype(np.float)
this_fp = np.cumsum(fp[tidx,:]).astype(np.float)
rec = this_tp / npos
prec = this_tp / (this_tp + this_fp)
ap[tidx] = interpolated_prec_rec(prec, rec)
return ap
def eval_ap(iou, clss, gt, prediction):
ap = compute_average_precision_detection(gt, prediction, iou)
return ap[0]
def detect(
dataset_dicts,
cas_dir,
subset,
out_file_name,
global_score_thrh,
metric_type,
thrh_type,
thrh_value,
interpolate_type,
proc_type,
proc_value,
sample_offset,
weight_inner,
weight_outter,
weight_global,
att_filtering_value=None,
):
assert (metric_type in ['score', 'multiply', 'att-filtering'])
assert (thrh_type in ['mean', 'max'])
assert (interpolate_type in ['quadratic', 'linear', 'nearest'])
assert (proc_type in ['dilation', 'median'])
out_detections = []
dataset_dict = dataset_dicts[subset]
for video_name in dataset_dict.keys():
cas_file = video_name + '.npz'
cas_data = np.load(os.path.join(cas_dir, cas_file))
avg_score = cas_data['avg_score']
att_weight = cas_data['weight']
branch_scores = cas_data['branch_scores']
global_score = cas_data['global_score']
fps = dataset_dict[video_name]['frame_rate']
frame_cnt = dataset_dict[video_name]['frame_cnt']
duration = frame_cnt/fps
global_score = softmax(global_score, dim=0)
################ Thresholding ################
for class_id in range(all_params['action_class_num']):
if global_score[class_id] <= global_score_thrh:
continue
if metric_type == 'score':
metric = softmax(avg_score, dim=1)[:, class_id:class_id + 1]
#metric = smooth(metric)
metric = normalize(metric)
elif metric_type == 'multiply':
_score = softmax(avg_score, dim=1)[:, class_id:class_id + 1]
metric = att_weight * _score
#metric = smooth(metric)
metric = normalize(metric)
elif metric_type == 'att-filtering':
assert (att_filtering_value is not None)
metric = softmax(avg_score, dim=1)[:, class_id:class_id + 1]
#metric = smooth(metric)
metric = normalize(metric)
metric[att_weight < att_filtering_value] = 0
metric = normalize(metric)
#########################################
metric = interpolate(metric[:, 0],
all_params['feature_type'],
frame_cnt,
all_params['base_sample_rate']*all_params['sample_rate'],
snippet_size=all_params['base_snippet_size'],
kind=interpolate_type)
metric = | np.expand_dims(metric, axis=1) | numpy.expand_dims |
'''
Stats.py - statistical utility functions
========================================
:Tags: Python
Code
----
'''
import math
import numpy
import scipy
import scipy.stats
import scipy.interpolate
import collections
from functools import reduce
def getSignificance(pvalue, thresholds=[0.05, 0.01, 0.001]):
"""return cartoon of significance of a p-Value."""
n = 0
for x in thresholds:
if pvalue > x:
return "*" * n
n += 1
return "*" * n
class Result(object):
'''allow both member and dictionary access.'''
slots = ("_data")
def __init__(self):
object.__setattr__(self, "_data", dict())
def __getattr__(self, key):
if not key.startswith("_"):
try:
return object.__getattribute__(self, "_data")[key]
except KeyError:
pass
return getattr(self._data, key)
def keys(self):
return list(self._data.keys())
def values(self):
return list(self._data.values())
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
def __str__(self):
return str(self._data)
def __contains__(self, key):
return key in self._data
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __setattr__(self, key, value):
if not key.startswith("_"):
self._data[key] = value
else:
object.__setattr__(self, key, value)
def __getstate__(self):
# required for correct pickling/unpickling
return object.__getattribute__(self, "_data")
def __setstate__(self, d):
# required for correct unpickling, otherwise
# maximum recursion threshold will be reached
object.__setattr__(self, "_data", d)
class LogLikelihoodTest:
def __init__(self):
pass
def doLogLikelihoodTest(complex_ll, complex_np,
simple_ll, simple_np,
significance_threshold=0.05):
"""perform log-likelihood test between model1 and model2.
"""
assert complex_ll >= simple_ll, "log likelihood of complex model smaller than for simple model: %f > %f" % (
complex_ll, simple_ll)
chi = 2 * (complex_ll - simple_ll)
df = complex_np - simple_np
if df <= 0:
raise ValueError("difference of degrees of freedom not larger than 0")
p = scipy.stats.distributions.chi2.sf(chi, df)
l = LogLikelihoodTest()
l.mComplexLogLikelihood = complex_ll
l.mSimpleLogLikelihood = simple_ll
l.mComplexNumParameters = complex_np
l.mSimpleNumParameters = simple_np
l.mSignificanceThreshold = significance_threshold
l.mProbability = p
l.mChiSquaredValue = chi
l.mDegreesFreedom = df
if p < significance_threshold:
l.mPassed = True
else:
l.mPassed = False
return l
class FDRResult:
def __init__(self):
pass
class BinomialTest:
def __init__(self):
pass
def doBinomialTest(p, sample_size, observed, significance_threshold=0.05):
"""perform a binomial test.
Given are p: the probability of the NULL hypothesis, the sample_size
and the number of observed counts.
"""
class ChiSquaredTest:
def __init__(self):
pass
def doChiSquaredTest(matrix, significance_threshold=0.05):
'''perform chi-squared test on a matrix.
The observed/expected values are in rows, the categories are in
columns, for example:
+---------+--------------+--------+----------+
|set |protein_coding|intronic|intergenic|
+---------+--------------+--------+----------+
|observed |92 |90 |194 |
+---------+--------------+--------+----------+
|expected |91 |10 |15 |
+---------+--------------+--------+----------+
If there are only two categories (one degrees of freedom) the
Yates correction is applied. For each entry (observed-expected),
the value 0.5 is subtracted ignoring the sign of the difference.
The test throws an exception if
1. one or more expected categories are less than 1 (it does not
matter what the observed values are)
2. more than one-fifth of expected categories are less than 5
'''
nrows, ncols = matrix.shape
if nrows != 2:
raise NotImplementedError(
"chi-square currently only implemented for 2xn tables.")
n = 0
for x in range(ncols):
if matrix[1][x] < 1:
raise ValueError("matrix contains expected counts < 1")
if matrix[1][x] < 5:
n += 1
if 100.0 * n / ncols > 20.0:
raise ValueError(
"more than 20% of expected categories are less than 5")
row_sums = [sum(matrix[x, :]) for x in range(nrows)]
col_sums = [sum(matrix[:, x]) for x in range(ncols)]
sample_size = float(sum(row_sums))
chi = 0.0
df = (nrows - 1) * (ncols - 1)
# Yates correction applies for a 2x2 table only (df==1)
if df == 1:
correction = 0.5 * 0.5
else:
correction = 0
for x in range(nrows):
for y in range(ncols):
expected = row_sums[x] * col_sums[y] / sample_size
# compute difference and apply Yates correction
d = abs(matrix[x, y] - expected) - correction
chi += (d * d) / expected
result = ChiSquaredTest()
result.mProbability = scipy.stats.chisqprob(chi, df)
result.mDegreesFreedom = df
result.mChiSquaredValue = chi
result.mPassed = result.mProbability < significance_threshold
result.mSignificance = getSignificance(result.mProbability)
result.mSampleSize = sample_size
result.mPhi = math.sqrt(result.mChiSquaredValue / result.mSampleSize)
return result
def doPearsonChiSquaredTest(p, sample_size, observed,
significance_threshold=0.05):
"""perform a pearson chi squared test.
Given are p: the probability of the NULL hypothesis, the sample_size
and the number of observed counts.
For large sample sizes, this test is a continuous approximation to
the binomial test.
"""
e = float(p) * sample_size
d = float(observed) - e
chi = d * d / e
df = 1
result = ChiSquaredTest()
result.mProbability = scipy.stats.chisqprob(chi, df)
result.mDegreesFreedom = df
result.mChiSquaredValue = chi
result.mPassed = result.mProbability < significance_threshold
result.mSignificance = getSignificance(result.mProbability)
result.mSampleSize = sample_size
result.mPhi = math.sqrt(result.mChiSquaredValue / result.mSampleSize)
result.mObserved = observed
result.mExpected = e
return result
class DistributionalParameters:
"""a collection of distributional parameters. Available properties
are:
mMean, mMedian, mMin, mMax, mSampleStd, mSum, mCounts
This method is deprecated - use :class:`Summary` instead.
"""
def __init__(self, values=None, format="%6.4f", mode="float"):
self.mMean, self.mMedian, self.mMin, self.mMax, self.mSampleStd, self.mSum, self.mCounts, self.mQ1, self.mQ3 = \
(0, 0, 0, 0, 0, 0, 0, 0, 0)
if values is not None and len(values) > 0:
self.updateProperties(values)
self.mFormat = format
self.mMode = mode
self.mNErrors = 0
def updateProperties(self, values):
"""update properties.
If values is an vector of strings, each entry will be converted
to float. Entries that can not be converted are ignored.
"""
values = [x for x in values if x is not None]
if len(values) == 0:
raise ValueError("no data for statistics")
# convert
self.mNErrors = 0
if type(values[0]) not in (int, float):
n = []
for x in values:
try:
n.append(float(x))
except ValueError:
self.mNErrors += 1
else:
n = values
if len(n) == 0:
raise ValueError("no data for statistics")
# use a non-sort algorithm later.
n.sort()
self.mQ1 = n[len(n) / 4]
self.mQ3 = n[len(n) * 3 / 4]
self.mCounts = len(n)
self.mMin = min(n)
self.mMax = max(n)
self.mMean = scipy.mean(n)
self.mMedian = scipy.median(n)
self.mSampleStd = scipy.std(n)
self.mSum = reduce(lambda x, y: x + y, n)
def getZScore(self, value):
"""return zscore for value."""
if self.mSampleStd > 0:
return (value - self.mMean) / self.mSampleStd
else:
return 0
def setFormat(self, format):
"""set number format."""
self.mFormat = format
def getHeaders(self):
"""returns header of column separated values."""
return ("nval", "min", "max", "mean", "median",
"stddev", "sum", "q1", "q3")
def getHeader(self):
"""returns header of column separated values."""
return "\t".join(self.getHeaders())
def items(self):
return [(x, self.__getitem__(x)) for x in self.getHeaders()]
def __getitem__(self, key):
if key == "nval":
return self.mCounts
if key == "min":
return self.mMin
if key == "max":
return self.mMax
if key == "mean":
return self.mMean
if key == "median":
return self.mMedian
if key == "stddev":
return self.mSampleStd
if key == "sum":
return self.mSum
if key == "q1":
return self.mQ1
if key == "q3":
return self.mQ3
raise KeyError(key)
def __str__(self):
"""return string representation of data."""
if self.mMode == "int":
format_vals = "%i"
format_median = "%.1f"
else:
format_vals = self.mFormat
format_median = self.mFormat
return "\t".join(("%i" % self.mCounts,
format_vals % self.mMin,
format_vals % self.mMax,
self.mFormat % self.mMean,
format_median % self.mMedian,
self.mFormat % self.mSampleStd,
format_vals % self.mSum,
format_vals % self.mQ1,
format_vals % self.mQ3,
))
class Summary(Result):
"""a collection of distributional parameters. Available properties
are:
mean, median, min, max, samplestd, sum, counts
"""
fields = ("nval", "min", "max", "mean",
"median", "stddev", "sum", "q1", "q3")
def __init__(self, values=None,
format="%6.4f", mode="float",
allow_empty=True):
Result.__init__(self)
self._format = format
self._mode = mode
# note that this determintes the order of the fields at output
self.counts, self.min, self.max, self.mean, self.median, self.samplestd, self.sum, self.q1, self.q3 = \
(0, 0, 0, 0, 0, 0, 0, 0, 0)
if values is not None:
values = [x for x in values if x is not None]
if len(values) == 0:
if allow_empty:
return
else:
raise ValueError("no data for statistics")
# convert
self._nerrors = 0
if type(values[0]) not in (int, float):
n = []
for x in values:
try:
n.append(float(x))
except ValueError:
self._nerrors += 1
else:
n = values
# use a non-sort algorithm?
n.sort()
if len(n):
self.q1 = n[len(n) // 4]
self.q3 = n[len(n) * 3 // 4]
else:
self.q1 = self.q3 = 0
self.counts = len(n)
self.min = min(n)
self.max = max(n)
self.mean = scipy.mean(n)
self.median = scipy.median(n)
self.samplestd = scipy.std(n)
self.sum = reduce(lambda x, y: x + y, n)
def getHeaders(self):
"""returns header of column separated values."""
return self.fields
def getHeader(self):
"""returns header of column separated values."""
return "\t".join(self.getHeaders())
def __str__(self):
"""return string representation of data."""
if self._mode == "int":
format_vals = "%i"
format_median = "%.1f"
else:
format_vals = self._format
format_median = self._format
return "\t".join(("%i" % self.counts,
format_vals % self.min,
format_vals % self.max,
self._format % self.mean,
format_median % self.median,
self._format % self.samplestd,
format_vals % self.sum,
format_vals % self.q1,
format_vals % self.q3,
))
def doFDRPython(pvalues,
vlambda=None,
pi0_method="smoother",
fdr_level=None,
robust=False,
smooth_df=3,
smooth_log_pi0=False,
pi0=None,
plot=False):
"""modeled after code taken from
http://genomics.princeton.edu/storeylab/qvalue/linux.html.
I did not like the error handling so I translated most to python.
Compute FDR after method by <NAME> al. (2002).
"""
if min(pvalues) < 0 or max(pvalues) > 1:
raise ValueError("p-values out of range")
# set to default of qvalue method
if vlambda is None:
vlambda = numpy.arange(0, 0.95, 0.05)
m = len(pvalues)
pvalues = numpy.array(pvalues, dtype=numpy.float)
if pi0 is None:
if type(vlambda) == float:
vlambda = (vlambda,)
if len(vlambda) > 1 and len(vlambda) < 4:
raise ValueError(
"if length of vlambda greater than 1, you need at least 4 values.")
if len(vlambda) > 1 and (min(vlambda) < 0 or max(vlambda) >= 1):
raise ValueError("vlambda must be within [0, 1).")
# estimate pi0
if len(vlambda) == 1:
vlambda = vlambda[0]
if vlambda < 0 or vlambda >= 1:
raise ValueError("vlambda must be within [0, 1).")
pi0 = numpy.mean([x >= vlambda for x in pvalues]) / (1.0 - vlambda)
pi0 = min(pi0, 1.0)
else:
pi0 = numpy.zeros(len(vlambda), numpy.float)
for i in range(len(vlambda)):
pi0[i] = numpy.mean([x >= vlambda[i]
for x in pvalues]) / (1.0 - vlambda[i])
if pi0_method == "smoother":
if smooth_log_pi0:
pi0 = math.log(pi0)
tck = scipy.interpolate.splrep(vlambda,
pi0,
k=smooth_df,
s=10000)
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(vlambda, pi0)
x2 = numpy.arange(0, 1, 0.001)
y2 = scipy.interpolate.splev(x2, tck)
plt.plot(x2, y2)
plt.show()
pi0 = scipy.interpolate.splev(max(vlambda), tck)
if smooth_log_pi0:
pi0 = math.exp(pi0)
elif pi0_method == "bootstrap":
minpi0 = min(pi0)
mse = numpy.zeros(len(vlambda), numpy.float)
pi0_boot = numpy.zeros(len(vlambda), numpy.float)
for i in range(100):
# sample pvalues
idx_boot = numpy.random.random_integers(0, m - 1, m)
pvalues_boot = pvalues[idx_boot]
for x in range(len(vlambda)):
# compute number of pvalues larger than lambda[x]
pi0_boot[x] = numpy.mean(
pvalues_boot > vlambda[x]) / (1.0 - vlambda[x])
mse += (pi0_boot - minpi0) ** 2
pi0 = min(pi0[mse == min(mse)])
else:
raise ValueError(
"'pi0_method' must be one of 'smoother' or 'bootstrap'.")
pi0 = min(pi0, 1.0)
if pi0 <= 0:
raise ValueError(
"The estimated pi0 <= 0. Check that you have valid p-values "
"or use another vlambda method.")
if fdr_level is not None and (fdr_level <= 0 or fdr_level > 1):
raise ValueError("'fdr_level' must be within (0, 1].")
# compute qvalues
idx = numpy.argsort(pvalues)
# monotonically decreasing bins, so that bins[i-1] > x >= bins[i]
bins = numpy.unique(pvalues)[::-1]
# v[i] = number of observations less than or equal to pvalue[i]
# could this be done more elegantly?
val2bin = len(bins) - numpy.digitize(pvalues, bins)
v = numpy.zeros(m, dtype=numpy.int)
lastbin = None
for x in range(m - 1, -1, -1):
bin = val2bin[idx[x]]
if bin != lastbin:
c = x
v[idx[x]] = c + 1
lastbin = bin
qvalues = pvalues * pi0 * m / v
if robust:
qvalues /= (1.0 - (1.0 - pvalues) ** m)
# bound qvalues by 1 and make them monotonic
qvalues[idx[m - 1]] = min(qvalues[idx[m - 1]], 1.0)
for i in range(m - 2, -1, -1):
qvalues[idx[i]] = min(min(qvalues[idx[i]], qvalues[idx[i + 1]]), 1.0)
result = FDRResult()
result.mQValues = qvalues
if fdr_level is not None:
result.mPassed = [x <= fdr_level for x in result.mQValues]
else:
result.mPassed = [False for x in result.mQValues]
result.mPValues = pvalues
result.mPi0 = pi0
result.mLambda = vlambda
result.xvalues = qvalues
return result
class CorrelationTest:
'''coefficient is r, not r squared'''
def __init__(self,
s_result=None,
method=None):
self.mPValue = None
self.mMethod = None
if s_result:
self.mCoefficient = s_result[0]
self.mPValue = s_result[1]
self.mNObservations = 0
self.mAlternative = "two-sided"
else:
self.mCoefficient = 0
self.mPValue = 1
self.mSignificance = "na"
self.mNObservations = 0
self.mAlternative = "na"
self.mMethod = "na"
if method:
self.mMethod = method
if self.mPValue is not None:
self.mSignificance = getSignificance(self.mPValue)
def __str__(self):
return "\t".join((
"%6.4f" % self.mCoefficient,
"%e" % self.mPValue,
self.mSignificance,
"%i" % self.mNObservations,
self.mMethod,
self.mAlternative))
@classmethod
def getHeaders(cls):
return ("coeff", "pvalue", "significance", "observations",
"method", "alternative")
def filterMasked(xvals, yvals, missing=("na", "Nan", None, ""),
dtype=numpy.float):
"""convert xvals and yvals to numpy array skipping pairs with
one or more missing values."""
xmask = [i in missing for i in xvals]
ymask = [i in missing for i in yvals]
return (numpy.array([xvals[i] for i in range(len(xvals))
if not xmask[i]], dtype=dtype),
numpy.array([yvals[i] for i in range(len(yvals))
if not ymask[i]], dtype=dtype))
def doCorrelationTest(xvals, yvals):
"""compute correlation between x and y.
Raises a value-error if there are not enough observations.
"""
if len(xvals) <= 1 or len(yvals) <= 1:
raise ValueError("can not compute correlation with no data")
if len(xvals) != len(yvals):
raise ValueError("data vectors have unequal length")
x, y = filterMasked(xvals, yvals)
result = CorrelationTest(s_result=scipy.stats.pearsonr(x, y),
method="pearson")
result.mNObservations = len(x)
return result
def getPooledVariance(data):
"""return pooled variance from a
list of tuples (sample_size, variance)."""
t, var = 0, 0
for n, s in data:
t += n
var += (n - 1) * s
assert t > len(data), "sample size smaller than samples combined"
return var / float(t - len(data))
def computeROC(values):
'''return a roc curve for *values*. Values
is a sorted list of (value, bool) pairs.
Deprecated - use getPerformance instead
returns a list of (FPR,TPR) tuples.
'''
roc = []
npositives = len([x for x in values if x[1]])
if npositives == 0:
raise ValueError("no positives among values")
ntotal = len(values)
last_value, last_fpr = None, None
tp, fp = 0, 0
tn, fn = ntotal - npositives, npositives
for value, is_positive in values:
if is_positive:
tp += 1
fn -= 1
else:
fp += 1
tn -= 1
if last_value != value:
try:
tpr = float(tp) / (tp + fn)
except ZeroDivisionError:
tpr = 0
try:
fpr = float(fp) / (fp + tn)
except ZeroDivisionError:
fpr = 0
if last_fpr != fpr:
roc.append((fpr, tpr))
last_fpr = fpr
last_values = value
return roc
class TTest:
def __init__(self):
pass
class WelchTTest:
def __init__(self):
pass
PairedTTest = collections.namedtuple("PairedTTest", "statistic pvalue")
def doPairedTTest(vals1, vals2):
'''perform paired t-test.
vals1 and vals2 need to contain the same number of elements.
'''
return PairedTTest._make(scipy.stats.ttest_rel(vals1, vals2))
def doWelchsTTest(n1, mean1, std1,
n2, mean2, std2,
alpha=0.05):
'''Welch''s approximate t-test for the difference of two means of
heteroscedasctic populations.
This functions does a two-tailed test.
see PMID: 12016052
:Parameters:
n1 : int
number of variates in sample 1
n2 : int
number of variates in sample 2
mean1 : float
mean of sample 1
mean2 : float
mean of sample 2
std1 : float
standard deviation of sample 1
std2 : float
standard deviation of sample 2
returns a WelchTTest
'''
if std1 == 0 and std2 == 0:
raise ValueError('standard deviations are 0.')
# convert standard deviation to sample variance
svar1 = std1 ** 2 * n1 / float(n1 - 1)
svar2 = std2 ** 2 * n2 / float(n2 - 1)
# compute df and test statistic
df = ((svar1 / n1 + svar2 / n2) ** 2) / \
(((svar1 / n1) ** 2) / (n1 - 1) + ((svar2 / n2) ** 2) / (n2 - 1))
denom = numpy.sqrt(svar1 / n1 + svar2 / n2)
z = abs(mean1 - mean2) / denom
# do the test
pvalue = 2 * scipy.stats.t.sf(z, df)
result = WelchTTest()
result.mPValue = pvalue
result.mDegreesFreedom = df
result.mZ = z
result.mMean1 = mean1
result.mMean2 = mean2
result.mSampleVariance1 = svar1
result.mSampleVariance2 = svar2
result.mDifference = mean1 - mean2
result.mZLower = scipy.stats.t.ppf(alpha, df)
result.mZUpper = scipy.stats.t.ppf(1.0 - alpha, df)
result.mDifferenceLower = result.mZLower * denom
result.mDifferenceUpper = result.mZUpper * denom
return result
def getAreaUnderCurve(xvalues, yvalues):
'''compute area under curve from a set of discrete x,y coordinates
using trapezoids.
This is only as accurate as the density of points.
'''
assert len(xvalues) == len(yvalues)
last_x, last_y = xvalues[0], yvalues[0]
auc = 0
for x, y in zip(xvalues, yvalues)[1:]:
dx = x - last_x
assert not dx <= 0, "x not increasing: %f >= %f" % (last_x, x)
dy = abs(last_y - y)
my = min(last_y, y)
# rectangle plus triangle
auc += dx * my + dx * dy / 2
last_x, last_y = x, y
return auc
def getSensitivityRecall(values):
'''return sensitivity/selectivity.
Values is a sorted list of (value, bool) pairs.
Deprecated - use getPerformance instead
'''
npositives = 0.0
npredicted = 0.0
l = None
result = []
total = float(len(values))
for value, is_positive in values:
npredicted += 1.0
if is_positive > 0:
npositives += 1.0
if value != l:
result.append((value, npositives / npredicted, npredicted / total))
l = value
if l:
result.append((l, npositives / npredicted, npredicted / total))
return result
ROCResult = collections.namedtuple("ROCResult",
"value pred tp fp tn fn tpr fpr tnr fnr rtpr rfnr")
def getPerformance(values,
skip_redundant=True,
false_negatives=False,
bin_by_value=True,
monotonous=False,
multiple=False,
increasing=True,
total_positives=None,
total_false_negatives=None,
):
'''compute performance estimates for a list of ``(score, flag)``
tuples in *values*.
Values is a sorted list of (value, bool) pairs.
If the option *false-negative* is set, the input is +/- or 1/0 for
a true positive or false negative, respectively.
TP: true positives
FP: false positives
TPR: true positive rate = true_positives / predicted
P: predicted
FPR: false positive rate = false positives / predicted
value: value
'''
true_positives = 0
predicted = 0
last_value = None
binned_values = []
for value, flag in values:
if not bin_by_value:
if last_value != value:
binned_values.append((true_positives, predicted, value))
else:
if last_value is not None and last_value != value:
binned_values.append((true_positives, predicted, last_value))
predicted += 1
if flag:
true_positives += 1
last_value = value
binned_values.append((true_positives, predicted, last_value))
binned_values.append((true_positives, predicted, value))
if true_positives == 0:
raise ValueError("# no true positives!")
if total_positives is None:
if total_false_negatives:
positives = float(predicted)
else:
positives = float(true_positives)
else:
positives = float(total_positives)
last_positives = None
last_tpr = None
result = []
for true_positives, predicted, value in binned_values:
if (predicted == 0):
predicted = 1
if total_false_negatives:
false_negatives = predicted - true_positives
false_positives = 0
true_negatives = 0
else:
true_negatives = 0
false_negatives = positives - true_positives
false_positives = predicted - true_positives
tpr = float(true_positives) / predicted
fpr = float(false_positives) / (true_positives + false_negatives)
fnr = float(false_negatives) / positives
tnr = 0
# relative rates
rfpr = float(false_positives) / predicted
rfnr = float(false_negatives) / predicted
if monotonous and last_tpr and last_tpr < tpr:
continue
if skip_redundant and true_positives == last_positives:
continue
if (predicted > 0):
result.append(ROCResult._make(
(value,
predicted,
true_positives,
false_positives,
true_negatives,
false_negatives,
tpr, fpr, tnr, fnr,
rfpr, rfnr)))
last_positives = true_positives
last_tpr = tpr
return result
def doMannWhitneyUTest(xvals, yvals):
'''apply the Mann-Whitney U test to test for the difference of medians.'''
raise NotImplementedError()
def adjustPValues(pvalues, method='fdr', n=None):
'''returns an array of adjusted pvalues
Reimplementation of p.adjust in the R package.
p: numeric vector of p-values (possibly with 'NA's). Any other
R is coerced by 'as.numeric'.
method: correction method. Valid values are:
n: number of comparisons, must be at least 'length(p)'; only set
this (to non-default) when you know what you are doing
For more information, see the documentation of the
p.adjust method in R.
'''
if n is None:
n = len(pvalues)
if method == "fdr":
method = "BH"
# optional, remove NA values
p = numpy.array(pvalues, dtype=numpy.float)
lp = len(p)
assert n <= lp
if n <= 1:
return p
if n == 2 and method == "hommel":
method = "hochberg"
if method == "bonferroni":
p0 = n * p
elif method == "holm":
i = numpy.arange(lp)
o = numpy.argsort(p)
ro = numpy.argsort(o)
m = numpy.maximum.accumulate((n - i) * p[o])
p0 = m[ro]
elif method == "hommel":
raise NotImplementedError("hommel method not implemented")
# if (n > lp) p <- c(p, rep.int(1, n - lp))
# i = numpy.arange(n)
# o = numpy.argsort(p)
# p = p[o]
# ro = numpy.argsort(o)
#
# q <- pa <- rep.int(min(n * p/i), n)
# for (j in (n - 1):2) {
# ij <- seq_len(n - j + 1)
# i2 <- (n - j + 2):n
# q1 <- min(j * p[i2]/(2:j))
# q[ij] <- pmin(j * p[ij], q1)
# q[i2] <- q[n - j + 1]
# pa <- pmax(pa, q)
# }
# pmax(pa, p)[if (lp < n) ro[1:lp] else ro]
elif method == "hochberg":
i = numpy.arange(0, lp)[::-1]
o = numpy.argsort(1 - p)
ro = numpy.argsort(o)
m = numpy.minimum.accumulate((n - i) * p[o])
p0 = m[ro]
elif method == "BH":
i = numpy.arange(1, lp + 1)[::-1]
o = | numpy.argsort(1 - p) | numpy.argsort |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models.common.ipynb (unless otherwise specified).
__all__ = ['Conv1d', 'LinearNorm', 'LocationLayer', 'Attention', 'STFT', 'MelSTFT', 'ReferenceEncoder',
'MultiHeadAttention', 'STL', 'GST', 'LayerNorm', 'Flip', 'Log', 'ElementwiseAffine', 'DDSConv', 'ConvFlow',
'WN', 'ResidualCouplingLayer', 'ResBlock1', 'ResBlock2', 'LRELU_SLOPE']
# Cell
import numpy as np
from scipy.signal import get_window
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, weight_norm
from librosa.filters import mel as librosa_mel
from librosa.util import pad_center, tiny
from ..utils.utils import *
from ..vendor.tfcompat.hparam import HParams
# Cell
class Conv1d(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super().__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
nn.init.xavier_uniform_(
self.conv.weight, gain=nn.init.calculate_gain(w_init_gain)
)
def forward(self, signal):
return self.conv(signal)
# Cell
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain="linear"):
super().__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
def forward(self, x):
return self.linear_layer(x)
# Cell
from numpy import finfo
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size, attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = Conv1d(
2,
attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding,
bias=False,
stride=1,
dilation=1,
)
self.location_dense = LinearNorm(
attention_n_filters, attention_dim, bias=False, w_init_gain="tanh"
)
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(
self,
attention_rnn_dim,
embedding_dim,
attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
fp16_run,
):
super(Attention, self).__init__()
self.query_layer = LinearNorm(
attention_rnn_dim, attention_dim, bias=False, w_init_gain="tanh"
)
self.memory_layer = LinearNorm(
embedding_dim, attention_dim, bias=False, w_init_gain="tanh"
)
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(
attention_location_n_filters, attention_location_kernel_size, attention_dim
)
if fp16_run:
self.score_mask_value = finfo("float16").min
else:
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory, attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(
torch.tanh(processed_query + processed_attention_weights + processed_memory)
)
energies = energies.squeeze(-1)
return energies
def forward(
self,
attention_hidden_state,
memory,
processed_memory,
attention_weights_cat,
mask,
attention_weights=None,
):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
if attention_weights is None:
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat
)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
# Cell
class STFT:
"""adapted from <NAME>'s https://github.com/pseeth/pytorch-stft"""
def __init__(
self,
filter_length=1024,
hop_length=256,
win_length=1024,
window="hann",
padding=None,
device="cpu",
rank=None,
):
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
self.padding = padding or (filter_length // 2)
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack(
[np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])]
)
if device == "cuda":
dev = torch.device(f"cuda:{rank}")
forward_basis = torch.cuda.FloatTensor(
fourier_basis[:, None, :], device=dev
)
inverse_basis = torch.cuda.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :].astype(np.float32),
device=dev,
)
else:
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
| np.linalg.pinv(scale * fourier_basis) | numpy.linalg.pinv |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import abc
from collections import Counter
from functools import lru_cache, reduce
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple
# Third-party imports
import numpy as np
import pandas as pd
# First-party imports
from gluonts.core.component import DType, validated
from gluonts.core.exception import GluonTSDateBoundsError, assert_data_error
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.dataset.stat import ScaleHistogram
from gluonts.time_feature import TimeFeature
def serialize_data_entry(data_entry: DataEntry) -> Dict:
"""
Encode the numpy values in the a DataEntry dictionary into lists so the
dictionary can be json serialized.
"""
def serialize_field(field):
if isinstance(field, np.ndarray):
# circumvent https://github.com/micropython/micropython/issues/3511
nan_ix = np.isnan(field)
field = field.astype(np.object_)
field[nan_ix] = "NaN"
return field.tolist()
return str(field)
return {
k: serialize_field(v)
for k, v in data_entry.data.items()
if v is not None
}
class FieldName:
"""
A bundle of default field names to be used by clients when instantiating
transformer instances.
"""
START = 'start'
TARGET = 'target'
FEAT_STATIC_CAT = 'feat_static_cat'
FEAT_STATIC_REAL = 'feat_static_real'
FEAT_DYNAMIC_CAT = 'feat_dynamic_cat'
FEAT_DYNAMIC_REAL = 'feat_dynamic_real'
FEAT_TIME = 'time_feat'
FEAT_CONST = 'feat_dynamic_const'
FEAT_AGE = 'feat_dynamic_age'
OBSERVED_VALUES = 'observed_values'
IS_PAD = 'is_pad'
FORECAST_START = 'forecast_start'
def compute_date(ts: pd.Timestamp, offset: int) -> pd.Timestamp:
"""
Computes an offsetted timestamp.
Basic wrapping around pandas `ts + offset` with caching and exception handling.
"""
return _compute_date_helper(ts, ts.freq, offset)
@lru_cache(maxsize=10000)
def _compute_date_helper(ts, freq, offset):
"""
We are using this helper function which explicitly uses the frequency as a
parameter, because the frequency is not included in the hash of a time
stamp.
I.e.
pd.Timestamp(x, freq='1D') and pd.Timestamp(x, freq='1min')
hash to the same value
"""
try:
# this line looks innocent, but can create a date which is out of
# bounds values over year 9999 raise a ValueError
# values over 2262-04-11 raise a pandas OutOfBoundsDatetime
return ts + offset * ts.freq
except (ValueError, pd._libs.OutOfBoundsDatetime) as ex:
raise GluonTSDateBoundsError(ex)
def target_transformation_length(
target: np.array, pred_length: int, is_train: bool
) -> int:
return target.shape[-1] + (0 if is_train else pred_length)
class InstanceSampler:
"""
An InstanceSampler is called with the time series and the valid
index bounds a, b and should return a set of indices a <= i <= b
at which training instances will be generated.
The object should be called with:
Parameters
----------
ts
target that should be sampled with shape (dim, seq_len)
a
first index of the target that can be sampled
b
last index of the target that can be sampled
Returns
-------
np.ndarray
Selected points to sample
"""
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
raise NotImplementedError()
class UniformSplitSampler(InstanceSampler):
"""
Samples each point with the same fixed probability.
Parameters
----------
p
Probability of selecting a time point
"""
@validated()
def __init__(self, p: float = 1.0 / 20.0) -> None:
self.p = p
self.lookup = np.arange(2 ** 13)
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
assert a <= b
while ts.shape[-1] >= len(self.lookup):
self.lookup = np.arange(2 * len(self.lookup))
mask = np.random.uniform(low=0.0, high=1.0, size=b - a + 1) < self.p
return self.lookup[a : a + len(mask)][mask]
class TestSplitSampler(InstanceSampler):
"""
Sampler used for prediction. Always selects the last time point for
splitting i.e. the forecast point for the time series.
"""
@validated()
def __init__(self) -> None:
pass
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
return np.array([b])
class ExpectedNumInstanceSampler(InstanceSampler):
"""
Keeps track of the average time series length and adjusts the probability
per time point such that on average `num_instances` training examples are
generated per time series.
Parameters
----------
num_instances
number of training examples generated per time series on average
"""
@validated()
def __init__(self, num_instances: float) -> None:
self.num_instances = num_instances
self.avg_length = 0.0
self.n = 0.0
self.lookup = np.arange(2 ** 13)
def __call__(self, ts: np.ndarray, a: int, b: int) -> np.ndarray:
while ts.shape[-1] >= len(self.lookup):
self.lookup = np.arange(2 * len(self.lookup))
self.n += 1.0
self.avg_length += float(b - a + 1 - self.avg_length) / float(self.n)
p = self.num_instances / self.avg_length
mask = np.random.uniform(low=0.0, high=1.0, size=b - a + 1) < p
indices = self.lookup[a : a + len(mask)][mask]
return indices
class BucketInstanceSampler(InstanceSampler):
"""
This sample can be used when working with a set of time series that have a
skewed distributions. For instance, if the dataset contains many time series
with small values and few with large values.
The probability of sampling from bucket i is the inverse of its number of elements.
Parameters
----------
scale_histogram
The histogram of scale for the time series. Here scale is the mean abs
value of the time series.
"""
@validated()
def __init__(self, scale_histogram: ScaleHistogram) -> None:
# probability of sampling a bucket i is the inverse of its number of
# elements
self.scale_histogram = scale_histogram
self.lookup = np.arange(2 ** 13)
def __call__(self, ts: np.ndarray, a: int, b: int) -> None:
while ts.shape[-1] >= len(self.lookup):
self.lookup = np.arange(2 * len(self.lookup))
p = 1.0 / self.scale_histogram.count(ts)
mask = np.random.uniform(low=0.0, high=1.0, size=b - a + 1) < p
indices = self.lookup[a : a + len(mask)][mask]
return indices
class Transformation(metaclass=abc.ABCMeta):
"""
Base class for all Transformations.
A Transformation processes works on a stream (iterator) of dictionaries.
"""
@abc.abstractmethod
def __call__(
self, data_it: Iterator[DataEntry], is_train: bool
) -> Iterator[DataEntry]:
pass
def estimate(self, data_it: Iterator[DataEntry]) -> Iterator[DataEntry]:
return data_it # default is to pass through without estimation
class Chain(Transformation):
"""
Chain multiple transformations together.
"""
@validated()
def __init__(self, trans: List[Transformation]) -> None:
self.trans = trans
def __call__(
self, data_it: Iterator[DataEntry], is_train: bool
) -> Iterator[DataEntry]:
tmp = data_it
for t in self.trans:
tmp = t(tmp, is_train)
return tmp
def estimate(self, data_it: Iterator[DataEntry]) -> Iterator[DataEntry]:
return reduce(lambda x, y: y.estimate(x), self.trans, data_it)
class Identity(Transformation):
def __call__(
self, data_it: Iterator[DataEntry], is_train: bool
) -> Iterator[DataEntry]:
return data_it
class MapTransformation(Transformation):
"""
Base class for Transformations that returns exactly one result per input in the stream.
"""
def __call__(
self, data_it: Iterator[DataEntry], is_train: bool
) -> Iterator:
for data_entry in data_it:
try:
yield self.map_transform(data_entry.copy(), is_train)
except Exception as e:
raise e
@abc.abstractmethod
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
pass
class SimpleTransformation(MapTransformation):
"""
Element wise transformations that are the same in train and test mode
"""
def map_transform(self, data: DataEntry, is_train: bool) -> DataEntry:
return self.transform(data)
@abc.abstractmethod
def transform(self, data: DataEntry) -> DataEntry:
pass
class AdhocTransform(SimpleTransformation):
"""
Applies a function as a transformation
This is called ad-hoc, because it is not serializable.
It is OK to use this for experiments and outside of a model pipeline that
needs to be serialized.
"""
def __init__(self, func: Callable[[DataEntry], DataEntry]) -> None:
self.func = func
def transform(self, data: DataEntry) -> DataEntry:
return self.func(data.copy())
class FlatMapTransformation(Transformation):
"""
Transformations that yield zero or more results per input, but do not combine
elements from the input stream.
"""
def __call__(
self, data_it: Iterator[DataEntry], is_train: bool
) -> Iterator:
for data_entry in data_it:
try:
for result in self.flatmap_transform(
data_entry.copy(), is_train
):
yield result
except Exception as e:
raise e
@abc.abstractmethod
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
pass
class FilterTransformation(FlatMapTransformation):
def __init__(self, condition: Callable[[DataEntry], bool]) -> None:
self.condition = condition
def flatmap_transform(
self, data: DataEntry, is_train: bool
) -> Iterator[DataEntry]:
if self.condition(data):
yield data
class SetField(SimpleTransformation):
"""
Sets a field in the dictionary with the given value.
Parameters
----------
output_field
Name of the field that will be set
value
Value to be set
"""
@validated()
def __init__(self, output_field: str, value: Any) -> None:
self.output_field = output_field
self.value = value
def transform(self, data: DataEntry) -> DataEntry:
data[self.output_field] = self.value
return data
class SetFieldIfNotPresent(SimpleTransformation):
"""
Sets a field in the dictionary with the given value, in case it does not exist already
Parameters
----------
output_field
Name of the field that will be set
value
Value to be set
"""
@validated()
def __init__(self, field: str, value: Any) -> None:
self.output_field = field
self.value = value
def transform(self, data: DataEntry) -> DataEntry:
if self.output_field not in data.keys():
data[self.output_field] = self.value
return data
class AsNumpyArray(SimpleTransformation):
"""
Converts the value of a field into a numpy array.
Parameters
----------
expected_ndim
Expected number of dimensions. Throws an exception if the number of
dimensions does not match.
dtype
numpy dtype to use.
"""
@validated()
def __init__(
self, field: str, expected_ndim: int, dtype: DType = np.float32
) -> None:
self.field = field
self.expected_ndim = expected_ndim
self.dtype = dtype
def transform(self, data: DataEntry) -> DataEntry:
value = data[self.field]
if not isinstance(value, float):
# this lines produces "ValueError: setting an array element with a
# sequence" on our test
# value = np.asarray(value, dtype=np.float32)
# see https://stackoverflow.com/questions/43863748/
value = np.asarray(list(value), dtype=self.dtype)
else:
# ugly: required as list conversion will fail in the case of a
# float
value = np.asarray(value, dtype=self.dtype)
assert_data_error(
value.ndim >= self.expected_ndim,
'Input for field "{self.field}" does not have the required'
'dimension (field: {self.field}, ndim observed: {value.ndim}, '
'expected ndim: {self.expected_ndim})',
value=value,
self=self,
)
data[self.field] = value
return data
class ExpandDimArray(SimpleTransformation):
"""
Expand dims in the axis specified, if the axis is not present does nothing.
(This essentially calls np.expand_dims)
Parameters
----------
field
Field in dictionary to use
axis
Axis to expand (see np.expand_dims for details)
"""
@validated()
def __init__(self, field: str, axis: Optional[int] = None) -> None:
self.field = field
self.axis = axis
def transform(self, data: DataEntry) -> DataEntry:
if self.axis is not None:
data[self.field] = np.expand_dims(data[self.field], axis=self.axis)
return data
class VstackFeatures(SimpleTransformation):
"""
Stack fields together using `np.vstack`.
Parameters
----------
output_field
Field name to use for the output
input_fields
Fields to stack together
drop_inputs
If set to true the input fields will be dropped.
"""
@validated()
def __init__(
self,
output_field: str,
input_fields: List[str],
drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [
fname for fname in self.input_fields if fname != output_field
]
)
def transform(self, data: DataEntry) -> DataEntry:
r = [data[fname] for fname in self.input_fields]
output = np.vstack(r)
data[self.output_field] = output
for fname in self.cols_to_drop:
del data[fname]
return data
class ConcatFeatures(SimpleTransformation):
"""
Concatenate values together using `np.concatenate`.
Parameters
----------
output_field
Field name to use for the output
input_fields
Fields to stack together
drop_inputs
If set to true the input fields will be dropped.
"""
@validated()
def __init__(
self,
output_field: str,
input_fields: List[str],
drop_inputs: bool = True,
) -> None:
self.output_field = output_field
self.input_fields = input_fields
self.cols_to_drop = (
[]
if not drop_inputs
else [
fname for fname in self.input_fields if fname != output_field
]
)
def transform(self, data: DataEntry) -> DataEntry:
r = [data[fname] for fname in self.input_fields]
output = np.concatenate(r)
data[self.output_field] = output
for fname in self.cols_to_drop:
del data[fname]
return data
class SwapAxes(SimpleTransformation):
"""
Apply `np.swapaxes` to fields.
Parameters
----------
fields
Field to apply to
axes
Axes to use
"""
@validated()
def __init__(self, fields: List[str], axes: Tuple[int, int]) -> None:
self.fields = fields
self.axis1, self.axis2 = axes
def transform(self, data: DataEntry) -> DataEntry:
for field in self.fields:
data[field] = self.swap(data[field])
return data
def swap(self, v):
if isinstance(v, np.ndarray):
return | np.swapaxes(v, self.axis1, self.axis2) | numpy.swapaxes |
import os
import numpy as np
import pandas as pd
import random
from scipy.stats import percentileofscore
from multiprocessing import Pool
def aikaike_criterion_rss(obs, mod, nparams):
ni = np.float(len(obs))
rss = np.sum([(oi - mi)**2 for oi, mi in zip(obs, mod)])
aic = 2 * nparams + ni * np.log(rss)
return aic
def aikaike_criterion_it(obs, mod, nparams, nbins=100):
bins = np.linspace(0, 1, nbins+1)
mod = (mod - np.nanmin(obs))/(np.nanmax(obs) - np.nanmin(obs))
mod = [mi if mi>0 else 0 for mi in mod]
mod = [mi if mi<1 else 1 for mi in mod]
obs = (obs - np.nanmin(obs))/(np.nanmax(obs) - np.nanmin(obs))
p_obs = np.histogramdd([obs], [bins])[0]
p_obs = p_obs / np.sum(p_obs)
p_mod = np.histogramdd([mod], [bins])[0]
p_mod = p_mod / np.sum(p_mod)
dl_score = np.sum([oi*np.log2(oi / mi) if ((mi > 0) and (oi > 0)) else 0 for oi, mi in zip(p_obs, p_mod)])
aic = 2 * nparams + 2 * dl_score
return aic
def cal_nse(obs, mod):
mo = np.nanmean(obs)
a = np.nansum([(mi - oi) ** 2 for mi, oi in zip(mod, obs)])
b = np.nansum([(oi - mo) ** 2 for oi in obs])
return 1 - a / b
def cal_mape(obs, mod):
mo = np.nanmean(obs)
ape = [np.abs(mi - oi) / mo for mi, oi in zip(mod, obs)]
return np.nanmean(ape)
def shannon_entropy(x, bins):
c = np.histogramdd(x, bins)[0]
p = c / np.sum(c)
p = p[p > 0]
h = - np.sum(p * np.log2(p))
return h
def interaction_information(mi_c, mi):
i = mi_c - mi
return i
def normalized_source_dependency(mi_s1_s2, H_s1, H_s2):
i = mi_s1_s2 / np.min([H_s1, H_s2])
return i
def redundant_information_bounds(mi_s1_tar, mi_s2_tar, interaction_info):
r_mmi = | np.min([mi_s1_tar, mi_s2_tar]) | numpy.min |
import argparse
import os
from glob import glob
import numpy as np
from copy import deepcopy
from datautil import AnnDataLoader, EntityConverter
from tqdm import tqdm
from statistics import mean
def cal_score(conf):
if conf[0][0] + conf[0][1] == 0:
recall = float("inf")
else:
recall = float(conf[0][0] / (conf[0][0] + conf[0][1]))
if conf[0][0] + conf[1][0] == 0:
precision = float("inf")
else:
precision = float(conf[0][0] / (conf[0][0] + conf[1][0]))
if recall + precision == 0:
f1 = float("inf")
else:
f1 = float(2 * recall * precision / (recall + precision))
return recall, precision, f1
def main(args):
gold_prefix = args.gold_prefix
pred_prefix = args.pred_prefix
gold_path = gold_prefix
pred_path = pred_prefix
is_tex = args.tex
if not os.path.exists(gold_path):
raise ValueError
if not os.path.exists(pred_path):
raise ValueError
gold_ann_files = glob(os.path.join(gold_path, "*.ann"))
pred_ann_files = glob(os.path.join(pred_path, "*.ann"))
all_conf_mats = {}
dataloader = AnnDataLoader()
for gold_ann_file in tqdm(gold_ann_files):
fname = os.path.basename(gold_ann_file)
pred_ann_file = os.path.join(pred_path, fname)
gold_txt_file = os.path.join(gold_path, os.path.splitext(fname)[0] + ".txt")
pred_txt_file = os.path.join(pred_path, os.path.splitext(fname)[0] + ".txt")
if not pred_ann_file in pred_ann_files:
continue
if not os.path.exists(gold_txt_file):
continue
if not os.path.exists(pred_txt_file):
continue
gold_text, gold_entity, gold_relation, gold_event = dataloader(gold_txt_file, gold_ann_file)
pred_text, pred_entity, pred_relation, pred_event = dataloader(pred_txt_file, pred_ann_file)
converter = EntityConverter(gold_entity, gold_event, pred_entity, pred_event)
conf_mats = {}
# relation
for tag, pr in pred_relation.items():
label = pr["label"]
arg1 = pr["arg1"]
arg2 = pr["arg2"]
if "E" in arg1:
head_label = "Operation"
else:
head_label = pred_entity[arg1]["label"]
if "E" in arg2:
tail_label = "Operation"
else:
tail_label = pred_entity[arg2]["label"]
label_s = "/".join((label, head_label, tail_label))
if not label in conf_mats.keys():
conf_mats[label] = np.zeros((2, 2))
if not label_s in conf_mats.keys():
conf_mats[label_s] = np.zeros((2, 2))
arg1_conv = converter.p2g(arg1)
arg2_conv = converter.p2g(arg2)
assert len(arg1_conv) == 1
assert len(arg2_conv) == 1
tp = False
for tag_gold, gld in gold_relation.items():
a1_gld = gld["arg1"]
a2_gld = gld["arg2"]
label_gld = gld["label"]
if (a1_gld in arg1_conv) and (a2_gld in arg2_conv) and label == label_gld:
tp = True
break
if tp:
conf_mats[label][0, 0] += 1
conf_mats[label_s][0, 0] += 1
else:
conf_mats[label][1, 0] += 1
conf_mats[label_s][1, 0] += 1
for tag, pr in gold_relation.items():
label = pr["label"]
arg1 = pr["arg1"]
arg2 = pr["arg2"]
if "E" in arg1:
head_label = "Operation"
else:
head_label = gold_entity[arg1]["label"]
if "E" in arg2:
tail_label = "Operation"
else:
tail_label = gold_entity[arg2]["label"]
label_s = "/".join((label, head_label, tail_label))
if not label in conf_mats.keys():
conf_mats[label] = np.zeros((2, 2))
if not label_s in conf_mats.keys():
conf_mats[label_s] = np.zeros((2, 2))
arg1_conv = converter.g2p(arg1)
arg2_conv = converter.g2p(arg2)
assert len(arg1_conv) == 1
assert len(arg2_conv) == 1
tp = False
for tag_pred, prd in pred_relation.items():
a1_prd = prd["arg1"]
a2_prd = prd["arg2"]
label_prd = prd["label"]
if (a1_prd in arg1_conv) and (a2_prd in arg2_conv) and label == label_prd:
tp = True
break
if tp:
pass
else:
conf_mats[label][0, 1] += 1
conf_mats[label_s][0, 1] += 1
# event
for tag, pr in pred_event.items():
tag_glds = converter.p2g(tag)
for p in pr:
label = p[0]
if label == "Operation":
continue
label_s = "/".join([p[0], "Operation", pred_entity[p[1]]["label"]])
tp = False
p_conv = (p[0], converter.p2g(p[1]))
if not label in conf_mats.keys():
conf_mats[label] = np.zeros((2, 2))
if not label_s in conf_mats.keys():
conf_mats[label_s] = np.zeros((2, 2))
for t in tag_glds:
judge = [gld[0] == p_conv[0] and gld[1] in p_conv[1] for gld in gold_event[t]]
if any(judge):
tp = True
if tp:
conf_mats[label][0, 0] += 1
conf_mats[label_s][0, 0] += 1
else:
conf_mats[label][1, 0] += 1
conf_mats[label_s][1, 0] += 1
# event
for tag, gld in gold_event.items():
tag_prd = converter.g2p(tag)
for g in gld:
label = g[0]
if label == "Operation":
continue
tp = False
g_conv = (g[0], converter.g2p(g[1]))
label_s = "/".join([g[0], "Operation", pred_entity[g[1]]["label"]])
if not label in conf_mats.keys():
conf_mats[label] = np.zeros((2, 2))
if not label_s in conf_mats.keys():
conf_mats[label_s] = np.zeros((2, 2))
for t in tag_prd:
judge = [prd[0] == g_conv[0] and prd[1] in g_conv[1] for prd in pred_event[t]]
if any(judge):
tp = True
if tp:
# conf_mats[label][0, 0] += 1
# conf_mats[label_s][0, 0] += 1
pass
else:
conf_mats[label][0, 1] += 1
conf_mats[label_s][0, 1] += 1
for key, val in conf_mats.items():
if not key in all_conf_mats:
all_conf_mats[key] = val
else:
all_conf_mats[key] += val
f1s = {}
ps = {}
rs = {}
if is_tex:
print('name & Precision & Recall & F-score \\\\')
for key in reversed(sorted(all_conf_mats.keys(), key=lambda x: x.count("/"))):
recall, precision, f1 = cal_score(all_conf_mats[key])
f1s[key] = f1
ps[key] = precision
rs[key] = recall
if is_tex:
print('{} & {:.3} & {:.3} & {:.3} \\\\'.format(key,precision,recall,f1))
else:
print(key)
print(all_conf_mats[key])
print("P: {:.5f} R: {:.5f} F1: {:.5f}".format(precision, recall, f1))
cls_conf = [val for key, val in all_conf_mats.items() if key.count("/") == 0]
conf_sum = sum(cls_conf)
recall_all, precision_all, micro_f = cal_score(conf_sum)
if is_tex:
print('{} & {:.3} & {:.3} & {:.3} \\\\'.format('Micro',precision_all,recall_all,micro_f))
else:
print()
print("Micro-F")
print(conf_sum)
print("P: {:.5f} R: {:.5f} F1: {:.5f}".format(precision_all, recall_all, micro_f))
cls_f1 = [f for key, f in f1s.items() if key.count("/") == 0]
cls_p = [f for key, f in ps.items() if key.count("/") == 0]
cls_r = [f for key, f in rs.items() if key.count("/") == 0]
for i, f in enumerate(cls_f1):
if np.isnan(f) or np.isinf(f):
cls_f1[i] = 0.0
for i, f in enumerate(cls_p):
if np.isnan(f) or np.isinf(f):
cls_p[i] = 0.0
for i, f in enumerate(cls_r):
if | np.isnan(f) | numpy.isnan |
"""
Homework 5
Helper Functions
"""
import cv2 as cv
import numpy as np
import scipy.optimize
import submission as sub
import numpy.linalg as la
import matplotlib.pyplot as plt
def _epipoles(E):
U, S, V = np.linalg.svd(E)
e1 = V[-1, :]
U, S, V = np.linalg.svd(E.T)
e2 = V[-1, :]
return e1, e2
def displayEpipolarF(I1, I2, F):
e1, e2 = _epipoles(F)
sy, sx, _ = I2.shape
f, [ax1, ax2] = plt.subplots(1, 2, figsize=(12, 9))
ax1.imshow(I1)
ax1.set_title('Select a point in this image')
ax1.set_axis_off()
ax2.imshow(I2)
ax2.set_title('Verify that the corresponding point \n is on the epipolar line in this image')
ax2.set_axis_off()
while True:
plt.sca(ax1)
x, y = plt.ginput(1, mouse_stop=2)[0]
xc, yc = int(x), int(y)
v = np.array([[xc], [yc], [1]])
l = F @ v
s = np.sqrt(l[0]**2+l[1]**2)
if s==0:
error('Zero line vector in displayEpipolar')
l = l / s
if l[1] != 0:
xs = 0
xe = sx - 1
ys = -(l[0] * xs + l[2]) / l[1]
ye = -(l[0] * xe + l[2]) / l[1]
else:
ys = 0
ye = sy - 1
xs = -(l[1] * ys + l[2]) / l[0]
xe = -(l[1] * ye + l[2]) / l[0]
ax1.plot(x, y, '*', MarkerSize=6, linewidth=2)
ax2.plot([xs, xe], [ys, ye], linewidth=2)
plt.draw()
def _singularize(F):
U, S, V = np.linalg.svd(F)
S[-1] = 0
F = U.dot(np.diag(S).dot(V))
return F
def _objective_F(f, pts1, pts2):
F = _singularize(f.reshape([3, 3]))
num_points = pts1.shape[0]
hpts1 = np.concatenate([pts1, np.ones([num_points, 1])], axis=1)
hpts2 = np.concatenate([pts2, np.ones([num_points, 1])], axis=1)
Fp1 = F.dot(hpts1.T)
FTp2 = F.T.dot(hpts2.T)
r = 0
for fp1, fp2, hp2 in zip(Fp1.T, FTp2.T, hpts2):
r += (hp2.dot(fp1))**2 * (1/(fp1[0]**2 + fp1[1]**2) + 1/(fp2[0]**2 + fp2[1]**2))
return r
def refineF(F, pts1, pts2):
f = scipy.optimize.fmin_powell(
lambda x: _objective_F(x, pts1, pts2), F.reshape([-1]),
maxiter=100000,
maxfun=10000
)
return _singularize(f.reshape([3, 3]))
def camera2(E):
U,S,V = np.linalg.svd(E)
m = S[:2].mean()
E = U.dot(np.array([[m,0,0], [0,m,0], [0,0,0]])).dot(V)
U,S,V = np.linalg.svd(E)
W = np.array([[0,-1,0], [1,0,0], [0,0,1]])
if np.linalg.det(U.dot(W).dot(V))<0:
W = -W
M2s = | np.zeros([3,4,4]) | numpy.zeros |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import process_tc as pt
def get_quantiles(values):
stats = {}
stats["mean"], stats["median"] = np.mean(values), | np.median(values) | numpy.median |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.